summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_cache.c2745
-rw-r--r--testpar/t_cache_image.c679
-rw-r--r--testpar/t_pflush1.c113
-rw-r--r--testpar/t_pread.c73
-rw-r--r--testpar/t_shapesame.c1706
-rw-r--r--testpar/t_span_tree.c704
6 files changed, 3006 insertions, 3014 deletions
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 700e993..5768e35 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -16,12 +16,11 @@
*
*/
-#include "h5test.h"
#include "testpar.h"
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#include "H5ACpkg.h"
#include "H5Cpkg.h"
@@ -33,9 +32,9 @@
#define BASE_ADDR (haddr_t)1024
-int nerrors = 0;
-int failures = 0;
-hbool_t verbose = TRUE; /* used to control error messages */
+int nerrors = 0;
+int failures = 0;
+hbool_t verbose = TRUE; /* used to control error messages */
#define NFILENAME 2
#define PARATESTFILE filenames[0]
@@ -45,23 +44,23 @@ const char *FILENAME[NFILENAME]={"CacheTestDummy", NULL};
#endif /* !PATH_MAX */
char filenames[NFILENAME][PATH_MAX];
hid_t fapl; /* file access property list */
-haddr_t max_addr = 0; /* used to store the end of
- * the address space used by
- * the data array (see below).
- */
-hbool_t callbacks_verbose = FALSE; /* flag used to control whether
- * the callback functions are in
- * verbose mode.
- */
+haddr_t max_addr = 0; /* used to store the end of
+ * the address space used by
+ * the data array (see below).
+ */
+hbool_t callbacks_verbose = FALSE; /* flag used to control whether
+ * the callback functions are in
+ * verbose mode.
+ */
-int world_mpi_size = -1;
-int world_mpi_rank = -1;
-int world_server_mpi_rank = -1;
-MPI_Comm world_mpi_comm = MPI_COMM_NULL;
-int file_mpi_size = -1;
-int file_mpi_rank = -1;
-MPI_Comm file_mpi_comm = MPI_COMM_NULL;
+int world_mpi_size = -1;
+int world_mpi_rank = -1;
+int world_server_mpi_rank = -1;
+MPI_Comm world_mpi_comm = MPI_COMM_NULL;
+int file_mpi_size = -1;
+int file_mpi_rank = -1;
+MPI_Comm file_mpi_comm = MPI_COMM_NULL;
/* the following globals are used to maintain rudementary statistics
@@ -74,67 +73,67 @@ long datum_destroys = 0;
long datum_flushes = 0;
long datum_pinned_flushes = 0;
long datum_loads = 0;
-long global_pins = 0;
-long global_dirty_pins = 0;
-long local_pins = 0;
+long global_pins = 0;
+long global_dirty_pins = 0;
+long local_pins = 0;
/* the following fields are used by the server process only */
-int total_reads = 0;
+int total_reads = 0;
int total_writes = 0;
/*****************************************************************************
* struct datum
*
- * Instances of struct datum are used to store information on entries
- * that may be loaded into the cache. The individual fields are
- * discussed below:
+ * Instances of struct datum are used to store information on entries
+ * that may be loaded into the cache. The individual fields are
+ * discussed below:
*
- * header: Instance of H5C_cache_entry_t used by the for its data.
- * This field is only used on the file processes, not on the
- * server process.
+ * header: Instance of H5C_cache_entry_t used by the for its data.
+ * This field is only used on the file processes, not on the
+ * server process.
*
- * This field MUST be the first entry in this structure.
+ * This field MUST be the first entry in this structure.
*
- * base_addr: Base address of the entry.
+ * base_addr: Base address of the entry.
*
- * len: Length of the entry.
+ * len: Length of the entry.
*
- * local_len: Length of the entry according to the cache. This
- * value must be positive, and may not be larger than len.
+ * local_len: Length of the entry according to the cache. This
+ * value must be positive, and may not be larger than len.
*
- * The field exists to allow us change the sizes of entries
- * in the cache without upsetting the server. This value
- * is only used locally, and is never sent to the server.
+ * The field exists to allow us change the sizes of entries
+ * in the cache without upsetting the server. This value
+ * is only used locally, and is never sent to the server.
*
- * ver: Version number of the entry. This number is initialize
- * to zero, and incremented each time the entry is modified.
+ * ver: Version number of the entry. This number is initialize
+ * to zero, and incremented each time the entry is modified.
*
- * dirty: Boolean flag indicating whether the entry is dirty.
+ * dirty: Boolean flag indicating whether the entry is dirty.
*
- * For current purposes, an entry is clean until it is
- * modified, and dirty until written to the server (cache
- * on process 0) or until it is marked clean (all other
- * caches).
+ * For current purposes, an entry is clean until it is
+ * modified, and dirty until written to the server (cache
+ * on process 0) or until it is marked clean (all other
+ * caches).
*
- * valid: Boolean flag indicating whether the entry contains
- * valid data. Attempts to read an entry whose valid
- * flag is not set should trigger an error.
+ * valid: Boolean flag indicating whether the entry contains
+ * valid data. Attempts to read an entry whose valid
+ * flag is not set should trigger an error.
*
- * locked: Boolean flag that is set to true iff the entry is in
- * the cache and locked.
+ * locked: Boolean flag that is set to true iff the entry is in
+ * the cache and locked.
*
- * global_pinned: Boolean flag that is set to true iff the entry has
- * been pinned collectively in all caches. Since writes must
- * be collective across all processes, only entries pinned
- * in this fashion may be marked dirty.
+ * global_pinned: Boolean flag that is set to true iff the entry has
+ * been pinned collectively in all caches. Since writes must
+ * be collective across all processes, only entries pinned
+ * in this fashion may be marked dirty.
*
- * local_pinned: Boolean flag that is set to true iff the entry
- * has been pinned in the local cache, but probably not all
- * caches. Such pins will typically not be consistant across
- * processes, and thus cannot be marked as dirty unless they
- * happen to overlap some collective operation.
+ * local_pinned: Boolean flag that is set to true iff the entry
+ * has been pinned in the local cache, but probably not all
+ * caches. Such pins will typically not be consistant across
+ * processes, and thus cannot be marked as dirty unless they
+ * happen to overlap some collective operation.
*
* cleared: Boolean flag that is set to true whenever the entry is
* dirty, and is cleared via a call to datum_notify with the
@@ -143,61 +142,61 @@ int total_writes = 0;
* flushed: Boolean flag that is set to true whenever the entry is
* dirty, and is flushed by the metadata cache.
*
- * reads: Integer field used to maintain a count of the number of
- * times this entry has been read from the server since
- * the last time the read and write counts were reset.
+ * reads: Integer field used to maintain a count of the number of
+ * times this entry has been read from the server since
+ * the last time the read and write counts were reset.
*
- * writes: Integer field used to maintain a count of the number of
- * times this entry has been written to the server since
- * the last time the read and write counts were reset.
+ * writes: Integer field used to maintain a count of the number of
+ * times this entry has been written to the server since
+ * the last time the read and write counts were reset.
*
- * index: Index of this instance of datum in the data_index[] array
- * discussed below.
+ * index: Index of this instance of datum in the data_index[] array
+ * discussed below.
*
- * aux_ptr: Pointer to the instance of H5AC_aux_t associated with the
- * instance of the metadata cache within which this entry
- * resides. This field was added to allow us to pass this
- * value to the notify callback from the serialize callback.
- * It should be NULL when not in use.
+ * aux_ptr: Pointer to the instance of H5AC_aux_t associated with the
+ * instance of the metadata cache within which this entry
+ * resides. This field was added to allow us to pass this
+ * value to the notify callback from the serialize callback.
+ * It should be NULL when not in use.
*
*****************************************************************************/
struct datum
{
- H5C_cache_entry_t header;
- haddr_t base_addr;
- size_t len;
- size_t local_len;
- int ver;
- hbool_t dirty;
- hbool_t valid;
- hbool_t locked;
- hbool_t global_pinned;
- hbool_t local_pinned;
- hbool_t cleared;
+ H5C_cache_entry_t header;
+ haddr_t base_addr;
+ size_t len;
+ size_t local_len;
+ int ver;
+ hbool_t dirty;
+ hbool_t valid;
+ hbool_t locked;
+ hbool_t global_pinned;
+ hbool_t local_pinned;
+ hbool_t cleared;
hbool_t flushed;
- int reads;
- int writes;
- int index;
+ int reads;
+ int writes;
+ int index;
struct H5AC_aux_t * aux_ptr;
};
/*****************************************************************************
* data array
*
- * The data array is an array of instances of datum of size
- * NUM_DATA_ENTRIES that is used to track the particulars of all
- * the entries that may be loaded into the cache.
+ * The data array is an array of instances of datum of size
+ * NUM_DATA_ENTRIES that is used to track the particulars of all
+ * the entries that may be loaded into the cache.
*
- * It exists on all processes, although the master copy is maintained
- * by the server process. If the cache is performing correctly, all
- * versions should be effectively identical. By that I mean that
- * the data received from the server should always match that in
- * the local version of the data array.
+ * It exists on all processes, although the master copy is maintained
+ * by the server process. If the cache is performing correctly, all
+ * versions should be effectively identical. By that I mean that
+ * the data received from the server should always match that in
+ * the local version of the data array.
*
*****************************************************************************/
-#define NUM_DATA_ENTRIES 100000
+#define NUM_DATA_ENTRIES 100000
struct datum data[NUM_DATA_ENTRIES];
@@ -216,10 +215,10 @@ struct datum data[NUM_DATA_ENTRIES];
* Further, this value must be consistant across all processes.
*/
-#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
-#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
+#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
+#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
/* Use a smaller test size to avoid creating huge MPE logfiles. */
-#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
+#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
int virt_num_data_entries = NUM_DATA_ENTRIES;
@@ -227,14 +226,14 @@ int virt_num_data_entries = NUM_DATA_ENTRIES;
/*****************************************************************************
* data_index array
*
- * The data_index array is an array of integer used to maintain a list
- * of instances of datum in the data array in increasing base_addr order.
+ * The data_index array is an array of integer used to maintain a list
+ * of instances of datum in the data array in increasing base_addr order.
*
- * This array is necessary, as move operations can swap the values
- * of the base_addr fields of two instances of datum. Without this
- * array, we would no longer be able to use a binary search on a sorted
- * list to find the indexes of instances of datum given the values of
- * their base_addr fields.
+ * This array is necessary, as move operations can swap the values
+ * of the base_addr fields of two instances of datum. Without this
+ * array, we would no longer be able to use a binary search on a sorted
+ * list to find the indexes of instances of datum given the values of
+ * their base_addr fields.
*
*****************************************************************************/
@@ -243,99 +242,99 @@ int data_index[NUM_DATA_ENTRIES];
/*****************************************************************************
* The following two #defines are used to control code that is in turn used
- * to force "POSIX" semantics on the server process used to simulate metadata
- * reads and writes. Without some such mechanism, the test code contains
+ * to force "POSIX" semantics on the server process used to simulate metadata
+ * reads and writes. Without some such mechanism, the test code contains
* race conditions that will frequently cause spurious failures.
*
* When set to TRUE, DO_WRITE_REQ_ACK forces the server to send an ack after
- * each write request, and the client to wait until the ack is received
+ * each write request, and the client to wait until the ack is received
* before proceeding. This was my first solution to the problem, and at
* first glance, it would seem to have a lot of unnecessary overhead.
*
* In an attempt to reduce the overhead, I implemented a second solution
- * in which no acks are sent after writes. Instead, the metadata cache is
- * provided with a callback function to call after each sequence of writes.
- * This callback simply causes the client to send the server process a
+ * in which no acks are sent after writes. Instead, the metadata cache is
+ * provided with a callback function to call after each sequence of writes.
+ * This callback simply causes the client to send the server process a
* "sync" message and and await an ack in reply.
*
- * Strangely, at least on Phoenix, the first solution runs faster by a
- * rather large margin. However, I can imagine this changing with
+ * Strangely, at least on Phoenix, the first solution runs faster by a
+ * rather large margin. However, I can imagine this changing with
* different OS's and MPI implementatins.
*
- * Thus I have left code supporting the second solution in place.
+ * Thus I have left code supporting the second solution in place.
*
- * Note that while one of these two #defines must be set to TRUE, there
- * should never be any need to set both of them to TRUE (although the
+ * Note that while one of these two #defines must be set to TRUE, there
+ * should never be any need to set both of them to TRUE (although the
* tests will still function with this setting).
*****************************************************************************/
-#define DO_WRITE_REQ_ACK TRUE
-#define DO_SYNC_AFTER_WRITE FALSE
+#define DO_WRITE_REQ_ACK TRUE
+#define DO_SYNC_AFTER_WRITE FALSE
/*****************************************************************************
* struct mssg
*
- * The mssg structure is used as a generic container for messages to
- * and from the server. Not all fields are used in all cases.
+ * The mssg structure is used as a generic container for messages to
+ * and from the server. Not all fields are used in all cases.
*
- * req: Integer field containing the type of the message.
+ * req: Integer field containing the type of the message.
*
- * src: World communicator MPI rank of the sending process.
+ * src: World communicator MPI rank of the sending process.
*
- * dest: World communicator MPI rank of the destination process.
+ * dest: World communicator MPI rank of the destination process.
*
- * mssg_num: Serial number assigned to the message by the sender.
+ * mssg_num: Serial number assigned to the message by the sender.
*
- * base_addr: Base address of a datum. Not used in all mssgs.
+ * base_addr: Base address of a datum. Not used in all mssgs.
*
- * len: Length of a datum (in bytes). Not used in all mssgs.
+ * len: Length of a datum (in bytes). Not used in all mssgs.
*
- * ver: Version number of a datum. Not used in all mssgs.
+ * ver: Version number of a datum. Not used in all mssgs.
*
- * count: Reported number of total/entry reads/writes. Not used
- * in all mssgs.
+ * count: Reported number of total/entry reads/writes. Not used
+ * in all mssgs.
*
- * magic: Magic number for error detection. Must be set to
- * MSSG_MAGIC.
+ * magic: Magic number for error detection. Must be set to
+ * MSSG_MAGIC.
*
*****************************************************************************/
-#define WRITE_REQ_CODE 0
-#define WRITE_REQ_ACK_CODE 1
-#define READ_REQ_CODE 2
-#define READ_REQ_REPLY_CODE 3
-#define SYNC_REQ_CODE 4
-#define SYNC_ACK_CODE 5
-#define REQ_TTL_WRITES_CODE 6
-#define REQ_TTL_WRITES_RPLY_CODE 7
-#define REQ_TTL_READS_CODE 8
-#define REQ_TTL_READS_RPLY_CODE 9
-#define REQ_ENTRY_WRITES_CODE 10
-#define REQ_ENTRY_WRITES_RPLY_CODE 11
-#define REQ_ENTRY_READS_CODE 12
-#define REQ_ENTRY_READS_RPLY_CODE 13
-#define REQ_RW_COUNT_RESET_CODE 14
-#define REQ_RW_COUNT_RESET_RPLY_CODE 15
-#define DONE_REQ_CODE 16
-#define MAX_REQ_CODE 16
-
-#define MSSG_MAGIC 0x1248
+#define WRITE_REQ_CODE 0
+#define WRITE_REQ_ACK_CODE 1
+#define READ_REQ_CODE 2
+#define READ_REQ_REPLY_CODE 3
+#define SYNC_REQ_CODE 4
+#define SYNC_ACK_CODE 5
+#define REQ_TTL_WRITES_CODE 6
+#define REQ_TTL_WRITES_RPLY_CODE 7
+#define REQ_TTL_READS_CODE 8
+#define REQ_TTL_READS_RPLY_CODE 9
+#define REQ_ENTRY_WRITES_CODE 10
+#define REQ_ENTRY_WRITES_RPLY_CODE 11
+#define REQ_ENTRY_READS_CODE 12
+#define REQ_ENTRY_READS_RPLY_CODE 13
+#define REQ_RW_COUNT_RESET_CODE 14
+#define REQ_RW_COUNT_RESET_RPLY_CODE 15
+#define DONE_REQ_CODE 16
+#define MAX_REQ_CODE 16
+
+#define MSSG_MAGIC 0x1248
struct mssg_t
{
- int req;
- int src;
- int dest;
- long int mssg_num;
- haddr_t base_addr;
- unsigned len;
- int ver;
- unsigned count;
- unsigned magic;
+ int req;
+ int src;
+ int dest;
+ long int mssg_num;
+ haddr_t base_addr;
+ unsigned len;
+ int ver;
+ unsigned count;
+ unsigned magic;
};
-MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
+MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
/*****************************************************************************/
@@ -409,24 +408,24 @@ static herr_t datum_notify(H5C_notify_action_t action, void *thing);
static herr_t datum_free_icr(void * thing);
/* Masquerade as object header entries to the cache */
-#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
+#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
-#define NUMBER_OF_ENTRY_TYPES 1
+#define NUMBER_OF_ENTRY_TYPES 1
/* Note the use of the H5AC__CLASS_SKIP_READS and H5AC__CLASS_SKIP_WRITES
* flags. As a result of these flags, the metadata cache does no file I/O
* on metadata of the datum type.
*
- * Instead, this test uses a server process to keep track of who has
+ * Instead, this test uses a server process to keep track of who has
* written and read what, and to verify that there are no messages from
* the past / future.
*
- * In the callbacks for the version 2 cache, this activity was hidden in
+ * In the callbacks for the version 2 cache, this activity was hidden in
* the load and flush callbacks. However, now we handle this function in
* notify callbacks for the after load and after flush events.
*
- * JRM -- 1/13/15
+ * JRM -- 1/13/15
*/
const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
{
@@ -474,9 +473,9 @@ static void pin_protected_entry(int32_t idx, hbool_t global);
static void move_entry(H5F_t * file_ptr, int32_t old_idx, int32_t new_idx);
static hbool_t reset_server_counts(void);
static void resize_entry(int32_t idx, size_t new_size);
-static hbool_t setup_cache_for_test(hid_t * fid_ptr,
+static hbool_t setup_cache_for_test(hid_t * fid_ptr,
H5F_t ** file_ptr_ptr,
- H5C_t ** cache_ptr_ptr,
+ H5C_t ** cache_ptr_ptr,
int metadata_write_strategy);
static void setup_rand(void);
static hbool_t take_down_cache(hid_t fid, H5C_t * cache_ptr);
@@ -550,17 +549,17 @@ print_stats(void)
/*****************************************************************************
*
- * Function: reset_stats()
+ * Function: reset_stats()
*
- * Purpose: Reset the rudementary stats maintained by t_cache.
+ * Purpose: Reset the rudementary stats maintained by t_cache.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 4/17/06
+ * Programmer: JRM -- 4/17/06
*
* Modifications:
*
- * None.
+ * None.
*
*****************************************************************************/
@@ -573,9 +572,9 @@ reset_stats(void)
datum_flushes = 0;
datum_pinned_flushes = 0;
datum_loads = 0;
- global_pins = 0;
- global_dirty_pins = 0;
- local_pins = 0;
+ global_pins = 0;
+ global_dirty_pins = 0;
+ local_pins = 0;
return;
@@ -588,20 +587,20 @@ reset_stats(void)
/*****************************************************************************
*
- * Function: set_up_file_communicator()
+ * Function: set_up_file_communicator()
*
- * Purpose: Create the MPI communicator used to open a HDF5 file with.
- * In passing, also initialize the file_mpi... globals.
+ * Purpose: Create the MPI communicator used to open a HDF5 file with.
+ * In passing, also initialize the file_mpi... globals.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 11/16/05
+ * Programmer: JRM -- 11/16/05
*
* Modifications:
*
- * None.
+ * None.
*
*****************************************************************************/
@@ -623,7 +622,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
- if ( verbose ) {
+ if ( verbose ) {
fprintf(stdout,
"%d:%s: MPI_Comm_group() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
@@ -740,16 +739,16 @@ set_up_file_communicator(void)
/*****************************************************************************
*
- * Function: addr_to_datum_index()
+ * Function: addr_to_datum_index()
*
- * Purpose: Given the base address of a datum, find and return its index
- * in the data array.
+ * Purpose: Given the base address of a datum, find and return its index
+ * in the data array.
*
- * Return: Success: index of target datum.
+ * Return: Success: index of target datum.
*
- * Failure: -1.
+ * Failure: -1.
*
- * Programmer: JRM -- 12/20/05
+ * Programmer: JRM -- 12/20/05
*
*****************************************************************************/
static int
@@ -787,16 +786,16 @@ addr_to_datum_index(haddr_t base_addr)
/*****************************************************************************
*
- * Function: init_data()
+ * Function: init_data()
*
- * Purpose: Initialize the data array, from which cache entries are
- * loaded.
+ * Purpose: Initialize the data array, from which cache entries are
+ * loaded.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/20/05
+ * Programmer: JRM -- 12/20/05
*
*****************************************************************************/
static void
@@ -831,14 +830,14 @@ init_data(void)
data[i].dirty = FALSE;
data[i].valid = FALSE;
data[i].locked = FALSE;
- data[i].global_pinned = FALSE;
- data[i].local_pinned = FALSE;
- data[i].cleared = FALSE;
+ data[i].global_pinned = FALSE;
+ data[i].local_pinned = FALSE;
+ data[i].cleared = FALSE;
data[i].flushed = FALSE;
data[i].reads = 0;
data[i].writes = 0;
- data[i].index = i;
- data[i].aux_ptr = NULL;
+ data[i].index = i;
+ data[i].aux_ptr = NULL;
data_index[i] = i;
@@ -862,22 +861,22 @@ init_data(void)
/*****************************************************************************
*
- * Function: do_express_test()
+ * Function: do_express_test()
*
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
+ * by GetTestExpress() across all processes. Return this
+ * value.
*
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
+ * Envirmoment variables can be different across different
+ * processes. This function ensures that all processes agree
+ * on whether to do an express test.
*
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
+ * Return: Success: Maximum of the values returned by
+ * GetTestExpress() across all processes.
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: JRM -- 4/25/06
+ * Programmer: JRM -- 4/25/06
*
*****************************************************************************/
static int
@@ -913,19 +912,19 @@ do_express_test(void)
/*****************************************************************************
*
- * Function: do_sync()
+ * Function: do_sync()
*
- * Purpose: Ensure that all messages sent by this process have been
- * processed before proceeding.
+ * Purpose: Ensure that all messages sent by this process have been
+ * processed before proceeding.
*
- * Do this by exchanging sync req / sync ack messages with
- * the server.
+ * Do this by exchanging sync req / sync ack messages with
+ * the server.
*
- * Do nothing if nerrors is greater than zero.
+ * Do nothing if nerrors is greater than zero.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 5/10/06
+ * Programmer: JRM -- 5/10/06
*
*****************************************************************************/
static void
@@ -937,7 +936,7 @@ do_sync(void)
if ( nerrors <= 0 ) {
/* compose the message */
- mssg.req = SYNC_REQ_CODE;
+ mssg.req = SYNC_REQ_CODE;
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
@@ -947,10 +946,10 @@ do_sync(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if ( ! send_mssg(&mssg, FALSE) ) {
- nerrors++;
- if ( verbose ) {
+ nerrors++;
+ if ( verbose ) {
HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
world_mpi_rank, FUNC);
}
@@ -959,24 +958,24 @@ do_sync(void)
if ( nerrors <= 0 ) {
- if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) {
+ if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) {
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != SYNC_ACK_CODE ) ||
+ } else if ( ( mssg.req != SYNC_ACK_CODE ) ||
( mssg.src != world_server_mpi_rank ) ||
( mssg.dest != world_mpi_rank ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ ( mssg.magic != MSSG_MAGIC ) ) {
nerrors++;
- if ( verbose ) {
+ if ( verbose ) {
HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n",
world_mpi_rank, FUNC);
}
- }
+ }
}
return;
@@ -986,17 +985,17 @@ do_sync(void)
/*****************************************************************************
*
- * Function: get_max_nerrors()
+ * Function: get_max_nerrors()
*
- * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors
- * across all processes. Return this value.
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors
+ * across all processes. Return this value.
*
- * Return: Success: Maximum of the nerrors global variables across
- * all processes.
+ * Return: Success: Maximum of the nerrors global variables across
+ * all processes.
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: JRM -- 1/3/06
+ * Programmer: JRM -- 1/3/06
*
*****************************************************************************/
static int
@@ -1033,29 +1032,29 @@ get_max_nerrors(void)
/*****************************************************************************
*
- * Function: recv_mssg()
+ * Function: recv_mssg()
*
- * Purpose: Receive a message from any process in the provided instance
- * of struct mssg.
+ * Purpose: Receive a message from any process in the provided instance
+ * of struct mssg.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
* Modifications:
*
- * JRM -- 5/10/06
- * Added mssg_tag_offset parameter and supporting code.
+ * JRM -- 5/10/06
+ * Added mssg_tag_offset parameter and supporting code.
*
*****************************************************************************/
-#define CACHE_TEST_TAG 99 /* different from any used by the library */
+#define CACHE_TEST_TAG 99 /* different from any used by the library */
static hbool_t
recv_mssg(struct mssg_t *mssg_ptr,
- int mssg_tag_offset)
+ int mssg_tag_offset)
{
hbool_t success = TRUE;
int mssg_tag = CACHE_TEST_TAG;
@@ -1117,28 +1116,28 @@ recv_mssg(struct mssg_t *mssg_ptr,
/*****************************************************************************
*
- * Function: send_mssg()
+ * Function: send_mssg()
*
- * Purpose: Send the provided instance of mssg to the indicated target.
+ * Purpose: Send the provided instance of mssg to the indicated target.
*
- * Note that all source and destination ranks are in the
- * global communicator.
+ * Note that all source and destination ranks are in the
+ * global communicator.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
* Modifications:
*
- * JRM -- 5/10/06
- * Added the add_req_to_tag parameter and supporting code.
+ * JRM -- 5/10/06
+ * Added the add_req_to_tag parameter and supporting code.
*
*****************************************************************************/
static hbool_t
send_mssg(struct mssg_t *mssg_ptr,
- hbool_t add_req_to_tag)
+ hbool_t add_req_to_tag)
{
hbool_t success = TRUE;
int mssg_tag = CACHE_TEST_TAG;
@@ -1166,10 +1165,10 @@ send_mssg(struct mssg_t *mssg_ptr,
mssg_ptr->mssg_num = mssg_num++;
- if ( add_req_to_tag ) {
+ if ( add_req_to_tag ) {
- mssg_tag += mssg_ptr->req;
- }
+ mssg_tag += mssg_ptr->req;
+ }
result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t,
mssg_ptr->dest, mssg_tag, world_mpi_comm);
@@ -1189,19 +1188,19 @@ send_mssg(struct mssg_t *mssg_ptr,
} /* send_mssg() */
-
+
/*****************************************************************************
*
- * Function: setup_derived_types()
+ * Function: setup_derived_types()
*
- * Purpose: Set up the derived types used by the test bed. At present,
- * only the mpi_mssg derived type is needed.
+ * Purpose: Set up the derived types used by the test bed. At present,
+ * only the mpi_mssg derived type is needed.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
*****************************************************************************/
static hbool_t
@@ -1278,19 +1277,19 @@ setup_derived_types(void)
} /* setup_derived_types */
-
+
/*****************************************************************************
*
- * Function: takedown_derived_types()
+ * Function: takedown_derived_types()
*
- * Purpose: take down the derived types used by the test bed. At present,
- * only the mpi_mssg derived type is needed.
+ * Purpose: take down the derived types used by the test bed. At present,
+ * only the mpi_mssg derived type is needed.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
*****************************************************************************/
static hbool_t
@@ -1322,16 +1321,16 @@ takedown_derived_types(void)
/*****************************************************************************
*
- * Function: reset_server_counters()
+ * Function: reset_server_counters()
*
- * Purpose: Reset the counters maintained by the server, doing a
- * sanity check in passing.
+ * Purpose: Reset the counters maintained by the server, doing a
+ * sanity check in passing.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -1363,7 +1362,7 @@ reset_server_counters(void)
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
actual_total_reads, total_reads);
}
}
@@ -1374,7 +1373,7 @@ reset_server_counters(void)
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
actual_total_writes, total_writes);
}
}
@@ -1389,25 +1388,25 @@ reset_server_counters(void)
/*****************************************************************************
*
- * Function: server_main()
+ * Function: server_main()
*
- * Purpose: Main function for the server process. This process exists
- * to provide an independant view of the data array.
+ * Purpose: Main function for the server process. This process exists
+ * to provide an independant view of the data array.
*
- * The function handles request from the other processes in
- * the test until the count of done messages received equals
- * the number of client processes.
+ * The function handles request from the other processes in
+ * the test until the count of done messages received equals
+ * the number of client processes.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
* Modifications:
*
- * JRM -- 5/10/06
- * Updated for sync message.
+ * JRM -- 5/10/06
+ * Updated for sync message.
*
*****************************************************************************/
static hbool_t
@@ -1437,98 +1436,98 @@ server_main(void)
switch ( mssg.req )
{
- case WRITE_REQ_CODE:
- success = serve_write_request(&mssg);
- break;
+ case WRITE_REQ_CODE:
+ success = serve_write_request(&mssg);
+ break;
- case WRITE_REQ_ACK_CODE:
+ case WRITE_REQ_ACK_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received write ack?!?.\n", FUNC);
- break;
+ break;
- case READ_REQ_CODE:
+ case READ_REQ_CODE:
success = serve_read_request(&mssg);
- break;
+ break;
- case READ_REQ_REPLY_CODE:
+ case READ_REQ_REPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received read req reply?!?.\n", FUNC);
- break;
+ break;
- case SYNC_REQ_CODE:
+ case SYNC_REQ_CODE:
success = serve_sync_request(&mssg);
- break;
+ break;
- case SYNC_ACK_CODE:
+ case SYNC_ACK_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received sync ack?!?.\n", FUNC);
- break;
+ break;
- case REQ_TTL_WRITES_CODE:
- success = serve_total_writes_request(&mssg);
- break;
+ case REQ_TTL_WRITES_CODE:
+ success = serve_total_writes_request(&mssg);
+ break;
- case REQ_TTL_WRITES_RPLY_CODE:
+ case REQ_TTL_WRITES_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received total writes reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_TTL_READS_CODE:
- success = serve_total_reads_request(&mssg);
- break;
+ case REQ_TTL_READS_CODE:
+ success = serve_total_reads_request(&mssg);
+ break;
- case REQ_TTL_READS_RPLY_CODE:
+ case REQ_TTL_READS_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received total reads reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_ENTRY_WRITES_CODE:
- success = serve_entry_writes_request(&mssg);
- break;
+ case REQ_ENTRY_WRITES_CODE:
+ success = serve_entry_writes_request(&mssg);
+ break;
- case REQ_ENTRY_WRITES_RPLY_CODE:
+ case REQ_ENTRY_WRITES_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_ENTRY_READS_CODE:
- success = serve_entry_reads_request(&mssg);
- break;
+ case REQ_ENTRY_READS_CODE:
+ success = serve_entry_reads_request(&mssg);
+ break;
- case REQ_ENTRY_READS_RPLY_CODE:
+ case REQ_ENTRY_READS_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_RW_COUNT_RESET_CODE:
- success = serve_rw_count_reset_request(&mssg);
- break;
+ case REQ_RW_COUNT_RESET_CODE:
+ success = serve_rw_count_reset_request(&mssg);
+ break;
- case REQ_RW_COUNT_RESET_RPLY_CODE:
+ case REQ_RW_COUNT_RESET_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", FUNC);
- break;
+ break;
- case DONE_REQ_CODE:
- done_count++;
- if(done_count >= file_mpi_size)
- done = TRUE;
- break;
+ case DONE_REQ_CODE:
+ done_count++;
+ if(done_count >= file_mpi_size)
+ done = TRUE;
+ break;
- default:
+ default:
nerrors++;
success = FALSE;
if(verbose)
- HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
- break;
+ HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
+ break;
}
}
}
@@ -1537,23 +1536,23 @@ server_main(void)
} /* server_main() */
-
+
/*****************************************************************************
*
- * Function: serve_read_request()
+ * Function: serve_read_request()
*
- * Purpose: Serve a read request.
+ * Purpose: Serve a read request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * a copy of the indicated datum from the data array to
- * the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * a copy of the indicated datum from the data array to
+ * the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
*****************************************************************************/
static hbool_t
@@ -1609,8 +1608,8 @@ serve_read_request(struct mssg_t * mssg_ptr)
"%d:%s: proc %d read invalid entry. idx/base_addr = %d/%a.\n",
world_mpi_rank, FUNC,
mssg_ptr->src,
- target_index,
- data[target_index].base_addr);
+ target_index,
+ data[target_index].base_addr);
}
} else {
@@ -1622,11 +1621,11 @@ serve_read_request(struct mssg_t * mssg_ptr)
reply.base_addr = data[target_index].base_addr;
reply.len = data[target_index].len;
reply.ver = data[target_index].ver;
- reply.count = 0;
+ reply.count = 0;
reply.magic = MSSG_MAGIC;
- /* and update the counters */
- total_reads++;
+ /* and update the counters */
+ total_reads++;
(data[target_index].reads)++;
}
}
@@ -1641,7 +1640,7 @@ serve_read_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d read 0x%llx. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
@@ -1649,38 +1648,38 @@ serve_read_request(struct mssg_t * mssg_ptr)
} else {
HDfprintf(stdout, "%d read 0x%llx FAILED. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
}
- }
+ }
return(success);
} /* serve_read_request() */
-
+
/*****************************************************************************
*
- * Function: serve_sync_request()
+ * Function: serve_sync_request()
*
- * Purpose: Serve a sync request.
+ * Purpose: Serve a sync request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends a
- * sync ack to the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends a
+ * sync ack to the requesting process.
*
- * This service exist to allow the sending process to ensure
- * that all previous messages have been processed before
- * proceeding.
+ * This service exist to allow the sending process to ensure
+ * that all previous messages have been processed before
+ * proceeding.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/10/06
+ * Programmer: JRM -- 5/10/06
*
*****************************************************************************/
static hbool_t
@@ -1712,7 +1711,7 @@ serve_sync_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = 0;
+ reply.count = 0;
reply.magic = MSSG_MAGIC;
}
@@ -1732,29 +1731,29 @@ serve_sync_request(struct mssg_t * mssg_ptr)
HDfprintf(stdout, "%d sync FAILED.\n", (int)(mssg_ptr->src));
}
- }
+ }
return(success);
} /* serve_sync_request() */
-
+
/*****************************************************************************
*
- * Function: serve_write_request()
+ * Function: serve_write_request()
*
- * Purpose: Serve a write request.
+ * Purpose: Serve a write request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it updates
- * the version number of the target data array entry as
- * specified in the message.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it updates
+ * the version number of the target data array entry as
+ * specified in the message.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/21/05
+ * Programmer: JRM -- 12/21/05
*
*****************************************************************************/
static hbool_t
@@ -1812,7 +1811,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
new_ver_num = mssg_ptr->ver;
/* this check should catch duplicate writes */
- if ( new_ver_num <= data[target_index].ver ) {
+ if ( new_ver_num <= data[target_index].ver ) {
nerrors++;
success = FALSE;
@@ -1826,12 +1825,12 @@ serve_write_request(struct mssg_t * mssg_ptr)
if ( success ) {
- /* process the write */
+ /* process the write */
data[target_index].ver = new_ver_num;
data[target_index].valid = TRUE;
/* and update the counters */
- total_writes++;
+ total_writes++;
(data[target_index].writes)++;
#if DO_WRITE_REQ_ACK
@@ -1847,7 +1846,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
reply.count = 0;
reply.magic = MSSG_MAGIC;
- /* and send it */
+ /* and send it */
success = send_mssg(&reply, TRUE);
#endif /* DO_WRITE_REQ_ACK */
@@ -1859,7 +1858,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d write 0x%llx. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
@@ -1867,36 +1866,36 @@ serve_write_request(struct mssg_t * mssg_ptr)
} else {
HDfprintf(stdout, "%d write 0x%llx FAILED. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
}
- }
+ }
return(success);
} /* serve_write_request() */
-
+
/*****************************************************************************
*
- * Function: serve_total_writes_request()
+ * Function: serve_total_writes_request()
*
- * Purpose: Serve a request for the total number of writes recorded since
- * the last reset.
+ * Purpose: Serve a request for the total number of writes recorded since
+ * the last reset.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the current value of the total_writes global variable to
- * the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the current value of the total_writes global variable to
+ * the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -1942,40 +1941,40 @@ serve_total_writes_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request total writes %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_writes);
} else {
HDfprintf(stdout, "%d request total writes %ld -- FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_writes);
}
- }
+ }
return(success);
} /* serve_total_writes_request() */
-
+
/*****************************************************************************
*
- * Function: serve_total_reads_request()
+ * Function: serve_total_reads_request()
*
- * Purpose: Serve a request for the total number of reads recorded since
- * the last reset.
+ * Purpose: Serve a request for the total number of reads recorded since
+ * the last reset.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the current value of the total_reads global variable to
- * the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the current value of the total_reads global variable to
+ * the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2021,40 +2020,40 @@ serve_total_reads_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request total reads %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_reads);
} else {
HDfprintf(stdout, "%d request total reads %ld -- FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_reads);
}
- }
+ }
return(success);
} /* serve_total_reads_request() */
-
+
/*****************************************************************************
*
- * Function: serve_entry_writes_request()
+ * Function: serve_entry_writes_request()
*
- * Purpose: Serve an entry writes request.
+ * Purpose: Serve an entry writes request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the number of times that the indicated datum has been
- * written since the last counter reset to the requesting
- * process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the number of times that the indicated datum has been
+ * written since the last counter reset to the requesting
+ * process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2101,7 +2100,7 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
reply.base_addr = target_addr;
reply.len = 0;
reply.ver = 0;
- reply.count = data[target_index].writes;
+ reply.count = data[target_index].writes;
reply.magic = MSSG_MAGIC;
}
}
@@ -2116,42 +2115,42 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request entry 0x%llx writes = %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].writes));
} else {
HDfprintf(stdout, "%d request entry 0x%llx writes = %ld FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].writes));
}
- }
+ }
return(success);
} /* serve_entry_writes_request() */
-
+
/*****************************************************************************
*
- * Function: serve_entry_reads_request()
+ * Function: serve_entry_reads_request()
*
- * Purpose: Serve an entry reads request.
+ * Purpose: Serve an entry reads request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the number of times that the indicated datum has been
- * read since the last counter reset to the requesting
- * process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the number of times that the indicated datum has been
+ * read since the last counter reset to the requesting
+ * process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2198,7 +2197,7 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
reply.base_addr = target_addr;
reply.len = 0;
reply.ver = 0;
- reply.count = (long)(data[target_index].reads);
+ reply.count = (long)(data[target_index].reads);
reply.magic = MSSG_MAGIC;
}
}
@@ -2213,41 +2212,41 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request entry 0x%llx reads = %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].reads));
} else {
HDfprintf(stdout, "%d request entry 0x%llx reads = %ld FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].reads));
}
- }
+ }
return(success);
} /* serve_entry_reads_request() */
-
+
/*****************************************************************************
*
- * Function: serve_rw_count_reset_request()
+ * Function: serve_rw_count_reset_request()
*
- * Purpose: Serve read/write count reset request.
+ * Purpose: Serve read/write count reset request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it resets the
- * read/write counters, and sends a confirmation message to
- * the calling process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it resets the
+ * read/write counters, and sends a confirmation message to
+ * the calling process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2272,7 +2271,7 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
if ( success ) {
success = reset_server_counters();
- }
+ }
if ( success ) {
@@ -2306,7 +2305,7 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
(int)(mssg_ptr->src));
}
- }
+ }
return(success);
@@ -2317,15 +2316,15 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
/**************************** Call back functions ****************************/
/*****************************************************************************/
-
+
/*-------------------------------------------------------------------------
- * Function: datum_get_initial_load_size
+ * Function: datum_get_initial_load_size
*
- * Purpose: Query the image size for an entry before deserializing it
+ * Purpose: Query the image size for an entry before deserializing it
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: Quincey Koziol
+ * Programmer: Quincey Koziol
* 5/18/10
*
*-------------------------------------------------------------------------
@@ -2355,9 +2354,9 @@ datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n",
+ "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n",
world_mpi_rank, idx, (long)addr, (int)entry_ptr->local_len);
- fflush(stdout);
+ fflush(stdout);
}
/* Set image length size */
@@ -2366,15 +2365,15 @@ datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
return(SUCCEED);
} /* get_initial_load_size() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_deserialize
+ * Function: datum_deserialize
*
- * Purpose: deserialize the entry.
+ * Purpose: deserialize the entry.
*
- * Return: void * (pointer to the in core representation of the entry)
+ * Return: void * (pointer to the in core representation of the entry)
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 9/20/07
*
*-------------------------------------------------------------------------
@@ -2409,10 +2408,10 @@ datum_deserialize(const void * image_ptr,
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
- world_mpi_rank, idx, (long)addr, (int)len,
- (int)(entry_ptr->header.is_dirty));
- fflush(stdout);
+ "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len,
+ (int)(entry_ptr->header.is_dirty));
+ fflush(stdout);
}
*dirty_ptr = FALSE;
@@ -2427,18 +2426,18 @@ datum_deserialize(const void * image_ptr,
} /* deserialize() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_image_len
+ * Function: datum_image_len
*
- * Purpose: Return the real (and possibly reduced) length of the image.
- * The helper functions verify that the correct version of
- * deserialize is being called, and then call deserialize
- * proper.
+ * Purpose: Return the real (and possibly reduced) length of the image.
+ * The helper functions verify that the correct version of
+ * deserialize is being called, and then call deserialize
+ * proper.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 9/19/07
*
*-------------------------------------------------------------------------
@@ -2465,10 +2464,10 @@ datum_image_len(const void *thing, size_t *image_len)
if(callbacks_verbose) {
HDfprintf(stdout,
- "%d: image_len() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)(entry_ptr->base_addr),
- (int)(entry_ptr->local_len));
- fflush(stdout);
+ "%d: image_len() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)(entry_ptr->base_addr),
+ (int)(entry_ptr->local_len));
+ fflush(stdout);
}
HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
@@ -2478,15 +2477,15 @@ datum_image_len(const void *thing, size_t *image_len)
return(SUCCEED);
} /* datum_image_len() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_serialize
+ * Function: datum_serialize
*
- * Purpose: Serialize the supplied entry.
+ * Purpose: Serialize the supplied entry.
*
- * Return: SUCCEED if successful, FAIL otherwise.
+ * Return: SUCCEED if successful, FAIL otherwise.
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 10/30/07
*
*-------------------------------------------------------------------------
@@ -2511,11 +2510,11 @@ datum_serialize(const H5F_t *f,
HDassert( f );
HDassert( f->shared );
HDassert( f->shared->cache );
-
+
cache_ptr = f->shared->cache;
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->aux_ptr );
+ HDassert( cache_ptr->aux_ptr );
aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr);
@@ -2534,9 +2533,9 @@ datum_serialize(const H5F_t *f,
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: serialize() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr, (int)len);
- fflush(stdout);
+ "%d: serialize() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr, (int)len);
+ fflush(stdout);
}
HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
@@ -2557,16 +2556,16 @@ datum_serialize(const H5F_t *f,
} /* datum_serialize() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_notify
+ * Function: datum_notify
*
- * Purpose: Do the communication with the server we used to do in the
- * flush and load callbacks in the version 2 cache.
+ * Purpose: Do the communication with the server we used to do in the
+ * flush and load callbacks in the version 2 cache.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 1/12/15
*
*-------------------------------------------------------------------------
@@ -2596,7 +2595,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d: notify() action = %d, idx = %d, addr = %ld.\n",
- world_mpi_rank, (int) action, idx,
+ world_mpi_rank, (int) action, idx,
(long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2685,62 +2684,62 @@ datum_notify(H5C_notify_action_t action, void *thing)
}
#if 0 /* This has been useful debugging code -- keep it for now. */
- if ( mssg.req != READ_REQ_REPLY_CODE ) {
+ if ( mssg.req != READ_REQ_REPLY_CODE ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n",
- world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.req = %d.\n",
- world_mpi_rank, FUNC, (int)(mssg.req));
- }
+ world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.req = %d.\n",
+ world_mpi_rank, FUNC, (int)(mssg.req));
+ }
- if ( mssg.src != world_server_mpi_rank ) {
+ if ( mssg.src != world_server_mpi_rank ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.src != world_server_mpi_rank.\n",
- world_mpi_rank, FUNC);
- }
+ world_mpi_rank, FUNC);
+ }
- if ( mssg.dest != world_mpi_rank ) {
+ if ( mssg.dest != world_mpi_rank ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.dest != world_mpi_rank.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, FUNC);
}
- if ( mssg.base_addr != entry_ptr->base_addr ) {
+ if ( mssg.base_addr != entry_ptr->base_addr ) {
- HDfprintf(stdout,
- "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
- world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
- world_mpi_rank, FUNC, mssg.base_addr);
- HDfprintf(stdout,
+ HDfprintf(stdout,
+ "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
+ world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
+ world_mpi_rank, FUNC, mssg.base_addr);
+ HDfprintf(stdout,
"%d:%s: entry_ptr->base_addr = %a.\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
entry_ptr->base_addr);
}
- if ( mssg.len != entry_ptr->len ) {
+ if ( mssg.len != entry_ptr->len ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.len != entry_ptr->len.\n",
- world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
- world_mpi_rank, FUNC, mssg.len);
+ world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
+ world_mpi_rank, FUNC, mssg.len);
}
- if ( mssg.ver < entry_ptr->ver ) {
+ if ( mssg.ver < entry_ptr->ver ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.ver < entry_ptr->ver.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, FUNC);
}
- if ( mssg.magic != MSSG_MAGIC ) {
+ if ( mssg.magic != MSSG_MAGIC ) {
- HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n",
+ world_mpi_rank, FUNC);
}
#endif /* JRM */
@@ -2753,7 +2752,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
}
break;
- case H5C_NOTIFY_ACTION_AFTER_FLUSH:
+ case H5C_NOTIFY_ACTION_AFTER_FLUSH:
if ( callbacks_verbose ) {
HDfprintf(stdout,
@@ -2767,11 +2766,11 @@ datum_notify(H5C_notify_action_t action, void *thing)
aux_ptr = entry_ptr->aux_ptr;
entry_ptr->aux_ptr = NULL;
- HDassert(entry_ptr->header.is_dirty); /* JRM */
+ HDassert(entry_ptr->header.is_dirty); /* JRM */
- if ( ( file_mpi_rank != 0 ) &&
+ if ( ( file_mpi_rank != 0 ) &&
( entry_ptr->dirty ) &&
- ( aux_ptr->metadata_write_strategy ==
+ ( aux_ptr->metadata_write_strategy ==
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY ) ) {
ret_value = FAIL;
@@ -2784,8 +2783,8 @@ datum_notify(H5C_notify_action_t action, void *thing)
if ( entry_ptr->header.is_dirty ) {
- was_dirty = TRUE; /* so we will receive the ack
- * if requested
+ was_dirty = TRUE; /* so we will receive the ack
+ * if requested
*/
/* compose the message */
@@ -2811,7 +2810,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
else
{
entry_ptr->dirty = FALSE;
- entry_ptr->flushed = TRUE;
+ entry_ptr->flushed = TRUE;
}
}
}
@@ -2839,7 +2838,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: Bad data in write req ack.\n",
world_mpi_rank, FUNC);
}
@@ -2855,7 +2854,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
datum_pinned_flushes++;
HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned);
}
- break;
+ break;
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
if ( callbacks_verbose ) {
@@ -2950,33 +2949,33 @@ datum_notify(H5C_notify_action_t action, void *thing)
/* do nothing */
break;
- default:
+ default:
nerrors++;
ret_value = FAIL;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: Unknown notify action.\n",
world_mpi_rank, FUNC);
}
- break;
+ break;
}
return(ret_value);
} /* datum_notify() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_free_icr
+ * Function: datum_free_icr
*
- * Purpose: Nominally, this callback is supposed to free the
- * in core representation of the entry.
+ * Purpose: Nominally, this callback is supposed to free the
+ * in core representation of the entry.
*
- * In the context of this test bed, we use it to do
- * do all the processing we used to do on a destroy.
+ * In the context of this test bed, we use it to do
+ * do all the processing we used to do on a destroy.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 9/19/07
*
*-------------------------------------------------------------------------
@@ -3001,9 +3000,9 @@ datum_free_icr(void * thing)
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: free_icr() idx = %d, dirty = %d.\n",
- world_mpi_rank, idx, (int)(entry_ptr->dirty));
- fflush(stdout);
+ "%d: free_icr() idx = %d, dirty = %d.\n",
+ world_mpi_rank, idx, (int)(entry_ptr->dirty));
+ fflush(stdout);
}
HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
@@ -3020,7 +3019,7 @@ datum_free_icr(void * thing)
return(SUCCEED);
} /* datum_free_icr() */
-
+
/*****************************************************************************/
/************************** test utility functions ***************************/
/*****************************************************************************/
@@ -3029,9 +3028,9 @@ datum_free_icr(void * thing)
* Function: expunge_entry()
*
* Purpose: Expunge the entry indicated by the type and index, mark it
- * as clean, and don't increment its version number.
+ * as clean, and don't increment its version number.
*
- * Do nothing if nerrors is non-zero on entry.
+ * Do nothing if nerrors is non-zero on entry.
*
* Return: void
*
@@ -3062,36 +3061,36 @@ expunge_entry(H5F_t * file_ptr,
if ( nerrors == 0 ) {
result = H5AC_expunge_entry(file_ptr, (hid_t)-1, &(types[0]),
- entry_ptr->header.addr, H5AC__NO_FLAGS_SET);
+ entry_ptr->header.addr, H5AC__NO_FLAGS_SET);
if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n",
+ world_mpi_rank, FUNC);
}
}
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
- HDassert( ! ((entry_ptr->header).is_dirty) );
+ HDassert( ! ((entry_ptr->header).is_dirty) );
- result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
- if ( result < 0 ) {
+ if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n",
+ world_mpi_rank, FUNC);
}
} else if ( in_cache ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n",
+ world_mpi_rank, FUNC);
}
}
}
@@ -3100,14 +3099,14 @@ expunge_entry(H5F_t * file_ptr,
} /* expunge_entry() */
-
+
/*****************************************************************************
* Function: insert_entry()
*
* Purpose: Insert the entry indicated by the type and index, mark it
- * as dirty, and increment its version number.
+ * as dirty, and increment its version number.
*
- * Do nothing if nerrors is non-zero on entry.
+ * Do nothing if nerrors is non-zero on entry.
*
* Return: void
*
@@ -3158,33 +3157,33 @@ insert_entry(H5C_t * cache_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n",
+ world_mpi_rank, FUNC);
}
}
if ( ! (entry_ptr->header.is_dirty) ) {
- /* it is possible that we just exceeded the dirty bytes
- * threshold, triggering a write of the newly inserted
- * entry. Test for this, and only flag an error if this
- * is not the case.
- */
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
- struct H5AC_aux_t * aux_ptr;
+ struct H5AC_aux_t * aux_ptr;
- aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr));
+ aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr));
- if ( ! ( ( aux_ptr != NULL ) &&
- ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
- ( aux_ptr->dirty_bytes == 0 ) ) ) {
+ if ( ! ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
+ ( aux_ptr->dirty_bytes == 0 ) ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n",
- world_mpi_rank, FUNC, idx,
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n",
+ world_mpi_rank, FUNC, idx,
(int)(data[idx].header.is_dirty));
- }
+ }
}
}
@@ -3192,7 +3191,7 @@ insert_entry(H5C_t * cache_ptr,
HDassert( entry_ptr->header.is_pinned );
entry_ptr->global_pinned = TRUE;
- global_pins++;
+ global_pins++;
} else {
@@ -3209,7 +3208,7 @@ insert_entry(H5C_t * cache_ptr,
} /* insert_entry() */
-
+
/*****************************************************************************
* Function: local_pin_and_unpin_random_entries()
*
@@ -3228,8 +3227,8 @@ static void
local_pin_and_unpin_random_entries(H5F_t * file_ptr,
int min_idx,
int max_idx,
- int min_count,
- int max_count)
+ int min_count,
+ int max_count)
{
if ( nerrors == 0 ) {
@@ -3244,40 +3243,40 @@ local_pin_and_unpin_random_entries(H5F_t * file_ptr,
HDassert( min_idx < max_idx );
HDassert( max_idx < NUM_DATA_ENTRIES );
HDassert( max_idx < virt_num_data_entries );
- HDassert( 0 <= min_count );
- HDassert( min_count < max_count );
+ HDassert( 0 <= min_count );
+ HDassert( min_count < max_count );
- count = (HDrand() % (max_count - min_count)) + min_count;
+ count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
- for ( i = 0; i < count; i++ )
- {
+ for ( i = 0; i < count; i++ )
+ {
local_pin_random_entry(file_ptr, min_idx, max_idx);
- }
+ }
- count = (HDrand() % (max_count - min_count)) + min_count;
+ count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
i = 0;
- idx = 0;
+ idx = 0;
- while ( ( i < count ) && ( idx >= 0 ) )
- {
- via_unprotect = ( (((unsigned)i) & 0x0001) == 0 );
- idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
- i++;
- }
+ while ( ( i < count ) && ( idx >= 0 ) )
+ {
+ via_unprotect = ( (((unsigned)i) & 0x0001) == 0 );
+ idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
+ i++;
+ }
}
return;
} /* local_pin_and_unpin_random_entries() */
-
+
/*****************************************************************************
* Function: local_pin_random_entry()
*
@@ -3309,13 +3308,13 @@ local_pin_random_entry(H5F_t * file_ptr,
HDassert( max_idx < NUM_DATA_ENTRIES );
HDassert( max_idx < virt_num_data_entries );
- do
- {
- idx = (HDrand() % (max_idx - min_idx)) + min_idx;
+ do
+ {
+ idx = (HDrand() % (max_idx - min_idx)) + min_idx;
HDassert( min_idx <= idx );
HDassert( idx <= max_idx );
- }
- while ( data[idx].global_pinned || data[idx].local_pinned );
+ }
+ while ( data[idx].global_pinned || data[idx].local_pinned );
pin_entry(file_ptr, idx, FALSE, FALSE);
}
@@ -3324,7 +3323,7 @@ local_pin_random_entry(H5F_t * file_ptr,
} /* local_pin_random_entry() */
-
+
/*****************************************************************************
* Function: local_unpin_all_entries()
*
@@ -3340,7 +3339,7 @@ local_pin_random_entry(H5F_t * file_ptr,
*****************************************************************************/
static void
local_unpin_all_entries(H5F_t * file_ptr,
- hbool_t via_unprotect)
+ hbool_t via_unprotect)
{
if ( nerrors == 0 ) {
@@ -3349,25 +3348,25 @@ local_unpin_all_entries(H5F_t * file_ptr,
HDassert( file_ptr );
- idx = 0;
+ idx = 0;
- while ( idx >= 0 )
- {
- idx = local_unpin_next_pinned_entry(file_ptr,
- idx, via_unprotect);
- }
+ while ( idx >= 0 )
+ {
+ idx = local_unpin_next_pinned_entry(file_ptr,
+ idx, via_unprotect);
+ }
}
return;
} /* local_unpin_all_entries() */
-
+
/*****************************************************************************
* Function: local_unpin_next_pinned_entry()
*
* Purpose: Find the next locally pinned entry after the specified
- * starting point, and unpin it.
+ * starting point, and unpin it.
*
* Do nothing if nerrors is non-zero on entry.
*
@@ -3382,7 +3381,7 @@ local_unpin_all_entries(H5F_t * file_ptr,
static int
local_unpin_next_pinned_entry(H5F_t * file_ptr,
int start_idx,
- hbool_t via_unprotect)
+ hbool_t via_unprotect)
{
int i = 0;
int idx = -1;
@@ -3394,39 +3393,39 @@ local_unpin_next_pinned_entry(H5F_t * file_ptr,
HDassert( start_idx < NUM_DATA_ENTRIES );
HDassert( start_idx < virt_num_data_entries );
- idx = start_idx;
+ idx = start_idx;
- while ( ( i < virt_num_data_entries ) &&
- ( ! ( data[idx].local_pinned ) ) )
- {
- i++;
- idx++;
- if ( idx >= virt_num_data_entries ) {
- idx = 0;
- }
- }
+ while ( ( i < virt_num_data_entries ) &&
+ ( ! ( data[idx].local_pinned ) ) )
+ {
+ i++;
+ idx++;
+ if ( idx >= virt_num_data_entries ) {
+ idx = 0;
+ }
+ }
- if ( data[idx].local_pinned ) {
+ if ( data[idx].local_pinned ) {
- unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
+ unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
- } else {
+ } else {
- idx = -1;
- }
+ idx = -1;
+ }
}
return(idx);
} /* local_unpin_next_pinned_entry() */
-
+
/*****************************************************************************
* Function: lock_and_unlock_random_entries()
*
* Purpose: Obtain a random number in the closed interval [min_count,
- * max_count]. Then protect and unprotect that number of
- * random entries.
+ * max_count]. Then protect and unprotect that number of
+ * random entries.
*
* Do nothing if nerrors is non-zero on entry.
*
@@ -3467,12 +3466,12 @@ lock_and_unlock_random_entries(H5F_t * file_ptr,
} /* lock_and_unlock_random_entries() */
-
+
/*****************************************************************************
* Function: lock_and_unlock_random_entry()
*
* Purpose: Protect and then unprotect a random entry with index in
- * the data[] array in the close interval [min_idx, max_idx].
+ * the data[] array in the close interval [min_idx, max_idx].
*
* Do nothing if nerrors is non-zero on entry.
*
@@ -3502,15 +3501,15 @@ lock_and_unlock_random_entry(H5F_t * file_ptr,
HDassert( min_idx <= idx );
HDassert( idx <= max_idx );
- lock_entry(file_ptr, idx);
- unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, idx);
+ unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET);
}
return;
} /* lock_and_unlock_random_entry() */
-
+
/*****************************************************************************
* Function: lock_entry()
*
@@ -3525,9 +3524,9 @@ lock_and_unlock_random_entry(H5F_t * file_ptr,
*
* Modifications:
*
- * JRM -- 7/11/06
- * Modified asserts to handle the new local_len field in
- * datum.
+ * JRM -- 7/11/06
+ * Modified asserts to handle the new local_len field in
+ * datum.
*
*****************************************************************************/
static void
@@ -3544,30 +3543,30 @@ lock_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert( ! (entry_ptr->locked) );
+ HDassert( ! (entry_ptr->locked) );
- cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr,
+ cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr,
H5AC_ind_read_dxpl_id,
&(types[0]), entry_ptr->base_addr,
- &entry_ptr->base_addr,
+ &entry_ptr->base_addr,
H5AC__NO_FLAGS_SET);
if ( ( cache_entry_ptr != (void *)(&(entry_ptr->header)) ) ||
( entry_ptr->header.type != &(types[0]) ) ||
( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
+ ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n",
+ world_mpi_rank, FUNC);
}
} else {
- entry_ptr->locked = TRUE;
+ entry_ptr->locked = TRUE;
- }
+ }
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
}
@@ -3604,33 +3603,33 @@ mark_entry_dirty(int32_t idx)
entry_ptr = &(data[idx]);
HDassert ( entry_ptr->locked || entry_ptr->global_pinned );
- HDassert ( ! (entry_ptr->local_pinned) );
+ HDassert ( ! (entry_ptr->local_pinned) );
(entry_ptr->ver)++;
entry_ptr->dirty = TRUE;
- result = H5AC_mark_entry_dirty( (void *)entry_ptr);
+ result = H5AC_mark_entry_dirty( (void *)entry_ptr);
if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: error in H5AC_mark_entry_dirty().\n",
world_mpi_rank, FUNC);
}
}
- else if ( ! ( entry_ptr->locked ) )
- {
- global_dirty_pins++;
- }
+ else if ( ! ( entry_ptr->locked ) )
+ {
+ global_dirty_pins++;
+ }
}
return;
} /* mark_entry_dirty() */
-
+
/*****************************************************************************
* Function: pin_entry()
*
@@ -3647,8 +3646,8 @@ mark_entry_dirty(int32_t idx)
static void
pin_entry(H5F_t * file_ptr,
int32_t idx,
- hbool_t global,
- hbool_t dirty)
+ hbool_t global,
+ hbool_t dirty)
{
unsigned int flags = H5AC__PIN_ENTRY_FLAG;
struct datum * entry_ptr;
@@ -3661,35 +3660,35 @@ pin_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert ( ! (entry_ptr->global_pinned) );
- HDassert ( ! (entry_ptr->local_pinned) );
- HDassert ( ! ( dirty && ( ! global ) ) );
+ HDassert ( ! (entry_ptr->global_pinned) );
+ HDassert ( ! (entry_ptr->local_pinned) );
+ HDassert ( ! ( dirty && ( ! global ) ) );
- lock_entry(file_ptr, idx);
+ lock_entry(file_ptr, idx);
- if ( dirty ) {
+ if ( dirty ) {
- flags |= H5AC__DIRTIED_FLAG;
- }
+ flags |= H5AC__DIRTIED_FLAG;
+ }
- unlock_entry(file_ptr, idx, flags);
+ unlock_entry(file_ptr, idx, flags);
HDassert( (entry_ptr->header).is_pinned );
- HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) );
+ HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) );
- if ( global ) {
+ if ( global ) {
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = TRUE;
- global_pins++;
+ global_pins++;
- } else {
+ } else {
- entry_ptr->local_pinned = TRUE;
+ entry_ptr->local_pinned = TRUE;
- local_pins++;
+ local_pins++;
- }
+ }
}
return;
@@ -3697,14 +3696,14 @@ pin_entry(H5F_t * file_ptr,
} /* pin_entry() */
#ifdef H5_METADATA_TRACE_FILE
-
+
/*****************************************************************************
* Function: pin_protected_entry()
*
* Purpose: Insert the entry indicated by the type and index, mark it
- * as dirty, and increment its version number.
+ * as dirty, and increment its version number.
*
- * Do nothing if nerrors is non-zero on entry.
+ * Do nothing if nerrors is non-zero on entry.
*
* Return: void
*
@@ -3714,7 +3713,7 @@ pin_entry(H5F_t * file_ptr,
*****************************************************************************/
static void
pin_protected_entry(int32_t idx,
- hbool_t global)
+ hbool_t global)
{
herr_t result;
struct datum * entry_ptr;
@@ -3728,36 +3727,36 @@ pin_protected_entry(int32_t idx,
if ( nerrors == 0 ) {
- result = H5AC_pin_protected_entry((void *)entry_ptr);
+ result = H5AC_pin_protected_entry((void *)entry_ptr);
if ( ( result < 0 ) ||
( entry_ptr->header.type != &(types[0]) ) ||
( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) )||
+ ( entry_ptr->local_len != entry_ptr->header.size ) )||
( entry_ptr->base_addr != entry_ptr->header.addr ) ||
- ( ! ( (entry_ptr->header).is_pinned ) ) ) {
+ ( ! ( (entry_ptr->header).is_pinned ) ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Error in H5AC_pin_protected entry().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout,
+ "%d:%s: Error in H5AC_pin_protected entry().\n",
+ world_mpi_rank, FUNC);
}
}
if ( global ) {
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = TRUE;
- global_pins++;
+ global_pins++;
- } else {
+ } else {
- entry_ptr->local_pinned = TRUE;
+ entry_ptr->local_pinned = TRUE;
- local_pins++;
+ local_pins++;
- }
+ }
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
}
@@ -3767,16 +3766,16 @@ pin_protected_entry(int32_t idx,
} /* pin_protected_entry() */
#endif /* H5_METADATA_TRACE_FILE */
-
+
/*****************************************************************************
* Function: move_entry()
*
* Purpose: Move the entry indicated old_idx to the entry indicated
- * by new_idex. Touch up the data array so that flush will
- * not choke.
+ * by new_idex. Touch up the data array so that flush will
+ * not choke.
*
- * Do nothing if nerrors isn't zero, or if old_idx equals
- * new_idx.
+ * Do nothing if nerrors isn't zero, or if old_idx equals
+ * new_idx.
*
* Return: void
*
@@ -3790,8 +3789,8 @@ move_entry(H5F_t * file_ptr,
int32_t new_idx)
{
herr_t result;
- int tmp;
- size_t tmp_len;
+ int tmp;
+ size_t tmp_len;
haddr_t old_addr = HADDR_UNDEF;
haddr_t new_addr = HADDR_UNDEF;
struct datum * old_entry_ptr;
@@ -3819,12 +3818,12 @@ move_entry(H5F_t * file_ptr,
/* Moving will mark the entry dirty if it is not already */
old_entry_ptr->dirty = TRUE;
- /* touch up versions, base_addrs, and data_index. Do this
- * now as it is possible that the rename will trigger a
+ /* touch up versions, base_addrs, and data_index. Do this
+ * now as it is possible that the rename will trigger a
* sync point.
*/
if(old_entry_ptr->ver < new_entry_ptr->ver)
- old_entry_ptr->ver = new_entry_ptr->ver;
+ old_entry_ptr->ver = new_entry_ptr->ver;
else
(old_entry_ptr->ver)++;
@@ -3838,11 +3837,11 @@ move_entry(H5F_t * file_ptr,
old_entry_ptr->index = new_entry_ptr->index;
new_entry_ptr->index = tmp;
- if(old_entry_ptr->local_len != new_entry_ptr->local_len) {
- tmp_len = old_entry_ptr->local_len;
- old_entry_ptr->local_len = new_entry_ptr->local_len;
- new_entry_ptr->local_len = tmp_len;
- } /* end if */
+ if(old_entry_ptr->local_len != new_entry_ptr->local_len) {
+ tmp_len = old_entry_ptr->local_len;
+ old_entry_ptr->local_len = new_entry_ptr->local_len;
+ new_entry_ptr->local_len = tmp_len;
+ } /* end if */
result = H5AC_move_entry(file_ptr, &(types[0]), old_addr, new_addr, H5AC_ind_read_dxpl_id);
@@ -3850,8 +3849,8 @@ move_entry(H5F_t * file_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n",
+ world_mpi_rank, FUNC);
}
} else {
@@ -3860,27 +3859,27 @@ move_entry(H5F_t * file_ptr,
if ( ! (old_entry_ptr->header.is_dirty) ) {
- /* it is possible that we just exceeded the dirty bytes
- * threshold, triggering a write of the newly inserted
- * entry. Test for this, and only flag an error if this
- * is not the case.
- */
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
- struct H5AC_aux_t * aux_ptr;
+ struct H5AC_aux_t * aux_ptr;
- aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr));
+ aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr));
- if ( ! ( ( aux_ptr != NULL ) &&
- ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
- ( aux_ptr->dirty_bytes == 0 ) ) ) {
+ if ( ! ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
+ ( aux_ptr->dirty_bytes == 0 ) ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: data[%d].header.is_dirty = %d.\n",
- world_mpi_rank, FUNC, new_idx,
+ world_mpi_rank, FUNC, new_idx,
(int)(data[new_idx].header.is_dirty));
- }
+ }
}
} else {
@@ -3891,19 +3890,19 @@ move_entry(H5F_t * file_ptr,
} /* move_entry() */
-
+
/*****************************************************************************
*
- * Function: reset_server_counts()
+ * Function: reset_server_counts()
*
- * Purpose: Send a message to the server process requesting it to reset
- * its counters. Await confirmation message.
+ * Purpose: Send a message to the server process requesting it to reset
+ * its counters. Await confirmation message.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/6/10
+ * Programmer: JRM -- 5/6/10
*
*****************************************************************************/
static hbool_t
@@ -3958,7 +3957,7 @@ reset_server_counts(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: Bad data in req r/w counter reset reply.\n",
world_mpi_rank, FUNC);
}
@@ -3969,15 +3968,15 @@ reset_server_counts(void)
} /* reset_server_counts() */
-
+
/*****************************************************************************
* Function: resize_entry()
*
* Purpose: Resize the pinned entry indicated by idx to the new_size.
- * Note that new_size must be greater than 0, and must be
- * less than or equal to the original size of the entry.
+ * Note that new_size must be greater than 0, and must be
+ * less than or equal to the original size of the entry.
*
- * Do nothing if nerrors isn't zero.
+ * Do nothing if nerrors isn't zero.
*
* Return: void
*
@@ -3987,7 +3986,7 @@ reset_server_counts(void)
*****************************************************************************/
static void
resize_entry(int32_t idx,
- size_t new_size)
+ size_t new_size)
{
herr_t result;
struct datum * entry_ptr;
@@ -4001,21 +4000,21 @@ resize_entry(int32_t idx,
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
HDassert( !(entry_ptr->locked) );
- HDassert( ( entry_ptr->global_pinned ) &&
- ( ! entry_ptr->local_pinned ) );
- HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
- ( entry_ptr->header.size == entry_ptr->local_len ) );
- HDassert( new_size > 0 );
- HDassert( new_size <= entry_ptr->len );
+ HDassert( ( entry_ptr->global_pinned ) &&
+ ( ! entry_ptr->local_pinned ) );
+ HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
+ ( entry_ptr->header.size == entry_ptr->local_len ) );
+ HDassert( new_size > 0 );
+ HDassert( new_size <= entry_ptr->len );
- result = H5AC_resize_entry((void *)entry_ptr, new_size);
+ result = H5AC_resize_entry((void *)entry_ptr, new_size);
if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n",
+ world_mpi_rank, FUNC);
}
} else {
@@ -4025,7 +4024,7 @@ resize_entry(int32_t idx,
HDassert( entry_ptr->header.size == new_size );
entry_ptr->dirty = TRUE;
- entry_ptr->local_len = new_size;
+ entry_ptr->local_len = new_size;
/* touch up version. */
@@ -4037,24 +4036,24 @@ resize_entry(int32_t idx,
} /* resize_entry() */
-
+
/*****************************************************************************
*
- * Function: setup_cache_for_test()
+ * Function: setup_cache_for_test()
*
- * Purpose: Setup the parallel cache for a test, and return the file id
- * and a pointer to the cache's internal data structures.
+ * Purpose: Setup the parallel cache for a test, and return the file id
+ * and a pointer to the cache's internal data structures.
*
- * To do this, we must create a file, flush it (so that we
- * don't have to worry about entries in the metadata cache),
- * look up the address of the metadata cache, and then instruct
- * the cache to omit sanity checks on dxpl IDs.
+ * To do this, we must create a file, flush it (so that we
+ * don't have to worry about entries in the metadata cache),
+ * look up the address of the metadata cache, and then instruct
+ * the cache to omit sanity checks on dxpl IDs.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/4/06
+ * Programmer: JRM -- 1/4/06
*
*****************************************************************************/
static hbool_t
@@ -4081,13 +4080,13 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( fid < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n",
world_mpi_rank, FUNC);
}
} else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4097,7 +4096,7 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( file_ptr == NULL ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n",
+ HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4107,13 +4106,13 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( cache_ptr == NULL ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n",
+ HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n",
world_mpi_rank, FUNC);
}
} else if ( cache_ptr->magic != H5C__H5C_T_MAGIC ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n",
+ HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4132,7 +4131,7 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
!= SUCCEED ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n",
world_mpi_rank, FUNC);
@@ -4142,9 +4141,9 @@ setup_cache_for_test(hid_t * fid_ptr,
config.metadata_write_strategy = metadata_write_strategy;
if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ != SUCCEED ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
world_mpi_rank, FUNC);
@@ -4167,15 +4166,15 @@ setup_cache_for_test(hid_t * fid_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n",
+ HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n",
world_mpi_rank, FUNC);
}
- } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic !=
+ } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic !=
H5AC__H5AC_AUX_T_MAGIC ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n",
world_mpi_rank, FUNC);
}
@@ -4184,14 +4183,14 @@ setup_cache_for_test(hid_t * fid_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n",
world_mpi_rank, FUNC);
}
}
}
- /* also verify that the expected metadata write strategy is reported
+ /* also verify that the expected metadata write strategy is reported
* when we get the current configuration.
*/
@@ -4202,25 +4201,25 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( H5AC_get_cache_auto_resize_config(cache_ptr, &test_config)
!= SUCCEED ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n",
world_mpi_rank, FUNC);
- } else if ( test_config.metadata_write_strategy !=
+ } else if ( test_config.metadata_write_strategy !=
metadata_write_strategy ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: unexpected metadata_write_strategy.\n",
world_mpi_rank, FUNC);
}
}
}
- /* allocate space for test entries -- do this before we set the
+ /* allocate space for test entries -- do this before we set the
* sync point done callback as it will dirty the superblock, requiring
* another flush. If the sync point done callback is set, this will
* cause a spurious failure.
@@ -4233,10 +4232,10 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( actual_base_addr == HADDR_UNDEF ) {
success = FALSE;
- nerrors++;
+ nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n",
world_mpi_rank, FUNC);
}
@@ -4247,10 +4246,10 @@ setup_cache_for_test(hid_t * fid_ptr,
* if the size of the superblock is increase.
*/
success = FALSE;
- nerrors++;
+ nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n",
+ HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n",
world_mpi_rank, FUNC);
}
}
@@ -4263,7 +4262,7 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -4273,62 +4272,62 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( success ) {
- if ( H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) {
+ if ( H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5C_set_write_done_callback failed.\n",
+ HDfprintf(stdout,
+ "%d:%s: H5C_set_write_done_callback failed.\n",
world_mpi_rank, FUNC);
}
- }
+ }
}
#endif /* DO_SYNC_AFTER_WRITE */
if ( success ) {
- if ( H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED ) {
+ if ( H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
+ HDfprintf(stdout,
+ "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
world_mpi_rank, FUNC);
}
- }
+ }
}
return(success);
} /* setup_cache_for_test() */
-
+
/*****************************************************************************
*
- * Function: verify_writes()
+ * Function: verify_writes()
*
- * Purpose: Verify that the indicated entries have been written exactly
- * once each, and that the indicated total number of writes
- * has been processed by the server process. Flag an error if
- * discrepency is noted. Finally reset the counters maintained
- * by the server process.
+ * Purpose: Verify that the indicated entries have been written exactly
+ * once each, and that the indicated total number of writes
+ * has been processed by the server process. Flag an error if
+ * discrepency is noted. Finally reset the counters maintained
+ * by the server process.
*
- * This function should only be called by the metadata cache
- * as the "sync point done" function, as it must do some
- * synchronization to avoid false positives.
+ * This function should only be called by the metadata cache
+ * as the "sync point done" function, as it must do some
+ * synchronization to avoid false positives.
*
- * Note that at present, this function does not allow for the
- * case in which one or more of the indicated entries should
- * have been written more than once since the last time the
- * server process's counters were reset. That is fine for now,
- * as with the current metadata write strategies, no entry
- * should be written more than once per sync point. If this
- * changes this limitation will have to be revisited.
+ * Note that at present, this function does not allow for the
+ * case in which one or more of the indicated entries should
+ * have been written more than once since the last time the
+ * server process's counters were reset. That is fine for now,
+ * as with the current metadata write strategies, no entry
+ * should be written more than once per sync point. If this
+ * changes this limitation will have to be revisited.
*
- * Return: void.
+ * Return: void.
*
- * Programmer: JRM -- 5/9/10
+ * Programmer: JRM -- 5/9/10
*
*****************************************************************************/
static void
@@ -4404,7 +4403,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
}
/* final barrier to ensure that all processes think that the server
- * counters have been reset before we leave the sync point. This
+ * counters have been reset before we leave the sync point. This
* barrier is probaby not necessary at this point in time (5/9/10),
* but I can think of at least one likely change to the metadata write
* strategies that will require it -- hence its insertion now.
@@ -4426,24 +4425,24 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
} /* verify_writes() */
-
+
/*****************************************************************************
*
- * Function: setup_rand()
+ * Function: setup_rand()
*
- * Purpose: Use gettimeofday() to obtain a seed for rand(), print the
- * seed to stdout, and then pass it to srand().
+ * Purpose: Use gettimeofday() to obtain a seed for rand(), print the
+ * seed to stdout, and then pass it to srand().
*
- * Increment nerrors if any errors are detected.
+ * Increment nerrors if any errors are detected.
*
- * Return: void.
+ * Return: void.
*
- * Programmer: JRM -- 1/12/06
+ * Programmer: JRM -- 1/12/06
*
* Modifications:
*
- * JRM -- 5/9/06
- * Modified function to facilitate setting predefined seeds.
+ * JRM -- 5/9/06
+ * Modified function to facilitate setting predefined seeds.
*
*****************************************************************************/
static void
@@ -4458,13 +4457,13 @@ setup_rand(void)
if ( ( use_predefined_seeds ) &&
( world_mpi_size == num_predefined_seeds ) ) {
- HDassert( world_mpi_rank >= 0 );
- HDassert( world_mpi_rank < world_mpi_size );
+ HDassert( world_mpi_rank >= 0 );
+ HDassert( world_mpi_rank < world_mpi_size );
seed = predefined_seeds[world_mpi_rank];
- HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n",
+ HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n",
world_mpi_rank, FUNC, seed);
- fflush(stdout);
+ fflush(stdout);
HDsrand(seed);
} else {
@@ -4473,7 +4472,7 @@ setup_rand(void)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n",
+ HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4491,21 +4490,21 @@ setup_rand(void)
} /* setup_rand() */
-
+
/*****************************************************************************
*
- * Function: take_down_cache()
+ * Function: take_down_cache()
*
- * Purpose: Take down the parallel cache after a test.
+ * Purpose: Take down the parallel cache after a test.
*
- * To do this, we must close the file, and delete if if
- * possible.
+ * To do this, we must close the file, and delete if if
+ * possible.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/4/06
+ * Programmer: JRM -- 1/4/06
*
*****************************************************************************/
static hbool_t
@@ -4513,7 +4512,7 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
{
hbool_t success = TRUE; /* will set to FALSE if appropriate. */
- /* flush the file -- this should write out any remaining test
+ /* flush the file -- this should write out any remaining test
* entries in the cache.
*/
if ( ( success ) && ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) ) {
@@ -4526,7 +4525,7 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
}
}
- /* Now reset the sync point done callback. Must do this as with
+ /* Now reset the sync point done callback. Must do this as with
* the SWMR mods, the cache will do additional I/O on file close
* un-related to the test entries, and thereby corrupt our counts
* of entry writes.
@@ -4557,7 +4556,7 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
world_mpi_rank, FUNC);
}
- }
+ }
if ( success ) {
@@ -4574,11 +4573,11 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
}
} else {
- /* verify that there have been no further writes of test
+ /* verify that there have been no further writes of test
* entries during the close
*/
success = verify_total_writes(0);
-
+
}
}
@@ -4586,19 +4585,19 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
} /* take_down_cache() */
-
+
/*****************************************************************************
* Function: verify_entry_reads
*
- * Purpose: Query the server to determine the number of times the
- * indicated entry has been read since the last time the
- * server counters were reset.
+ * Purpose: Query the server to determine the number of times the
+ * indicated entry has been read since the last time the
+ * server counters were reset.
*
- * Return TRUE if successful, and if the supplied expected
- * number of reads matches the number of reads reported by
- * the server process.
+ * Return TRUE if successful, and if the supplied expected
+ * number of reads matches the number of reads reported by
+ * the server process.
*
- * Return FALSE and flag an error otherwise.
+ * Return FALSE and flag an error otherwise.
*
* Return: TRUE if successful, FALSE otherwise.
*
@@ -4681,31 +4680,31 @@ verify_entry_reads(haddr_t addr,
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: rep/exp entry 0x%llx reads mismatch (%ld/%ld).\n",
world_mpi_rank, FUNC, (long long)addr,
reported_entry_reads, expected_entry_reads);
}
- }
+ }
}
return(success);
} /* verify_entry_reads() */
-
+
/*****************************************************************************
* Function: verify_entry_writes
*
- * Purpose: Query the server to determine the number of times the
- * indicated entry has been written since the last time the
- * server counters were reset.
+ * Purpose: Query the server to determine the number of times the
+ * indicated entry has been written since the last time the
+ * server counters were reset.
*
- * Return TRUE if successful, and if the supplied expected
- * number of reads matches the number of reads reported by
- * the server process.
+ * Return TRUE if successful, and if the supplied expected
+ * number of reads matches the number of reads reported by
+ * the server process.
*
- * Return FALSE and flag an error otherwise.
+ * Return FALSE and flag an error otherwise.
*
* Return: TRUE if successful, FALSE otherwise.
*
@@ -4788,36 +4787,36 @@ verify_entry_writes(haddr_t addr,
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: rep/exp entry 0x%llx writes mismatch (%ld/%ld).\n",
world_mpi_rank, FUNC, (long long)addr,
reported_entry_writes, expected_entry_writes);
}
- }
+ }
}
return(success);
} /* verify_entry_writes() */
-
+
/*****************************************************************************
*
- * Function: verify_total_reads()
+ * Function: verify_total_reads()
*
- * Purpose: Query the server to obtain the total reads since the last
- * server counter reset, and compare this value with the supplied
- * expected value.
+ * Purpose: Query the server to obtain the total reads since the last
+ * server counter reset, and compare this value with the supplied
+ * expected value.
*
- * If the values match, return TRUE.
+ * If the values match, return TRUE.
*
- * If the values don't match, flag an error and return FALSE.
+ * If the values don't match, flag an error and return FALSE.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/6/10
+ * Programmer: JRM -- 5/6/10
*
*****************************************************************************/
static hbool_t
@@ -4888,9 +4887,9 @@ verify_total_reads(int expected_total_reads)
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: reported/expected total reads mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
reported_total_reads, expected_total_reads);
}
@@ -4901,24 +4900,24 @@ verify_total_reads(int expected_total_reads)
} /* verify_total_reads() */
-
+
/*****************************************************************************
*
- * Function: verify_total_writes()
+ * Function: verify_total_writes()
*
- * Purpose: Query the server to obtain the total writes since the last
- * server counter reset, and compare this value with the supplied
- * expected value.
+ * Purpose: Query the server to obtain the total writes since the last
+ * server counter reset, and compare this value with the supplied
+ * expected value.
*
- * If the values match, return TRUE.
+ * If the values match, return TRUE.
*
- * If the values don't match, flag an error and return FALSE.
+ * If the values don't match, flag an error and return FALSE.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/6/10
+ * Programmer: JRM -- 5/6/10
*
*****************************************************************************/
static hbool_t
@@ -4989,9 +4988,9 @@ verify_total_writes(unsigned expected_total_writes)
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: reported/expected total writes mismatch (%u/%u).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
reported_total_writes, expected_total_writes);
}
}
@@ -5001,7 +5000,7 @@ verify_total_writes(unsigned expected_total_writes)
} /* verify_total_writes() */
-
+
/*****************************************************************************
* Function: unlock_entry()
*
@@ -5016,8 +5015,8 @@ verify_total_writes(unsigned expected_total_writes)
*
* Modifications:
*
- * 7/11/06
- * Updated for the new local_len field in datum.
+ * 7/11/06
+ * Updated for the new local_len field in datum.
*
*****************************************************************************/
static void
@@ -5037,7 +5036,7 @@ unlock_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert( entry_ptr->locked );
+ HDassert( entry_ptr->locked );
dirtied = ((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG );
@@ -5053,7 +5052,7 @@ unlock_entry(H5F_t * file_ptr,
if ( ( result < 0 ) ||
( entry_ptr->header.type != &(types[0]) ) ||
( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
+ ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
nerrors++;
@@ -5065,7 +5064,7 @@ unlock_entry(H5F_t * file_ptr,
entry_ptr->locked = FALSE;
- }
+ }
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
@@ -5086,7 +5085,7 @@ unlock_entry(H5F_t * file_ptr,
} /* unlock_entry() */
-
+
/*****************************************************************************
* Function: unpin_entry()
*
@@ -5101,8 +5100,8 @@ unlock_entry(H5F_t * file_ptr,
*
* Modifications:
*
- * JRM -- 8/15/06
- * Added assertion that entry is pinned on entry.
+ * JRM -- 8/15/06
+ * Added assertion that entry is pinned on entry.
*
*****************************************************************************/
static void
@@ -5124,54 +5123,54 @@ unpin_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert( (entry_ptr->header).is_pinned );
- HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) );
- HDassert ( ( global && entry_ptr->global_pinned ) ||
- ( ! global && entry_ptr->local_pinned ) );
- HDassert ( ! ( dirty && ( ! global ) ) );
+ HDassert( (entry_ptr->header).is_pinned );
+ HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) );
+ HDassert ( ( global && entry_ptr->global_pinned ) ||
+ ( ! global && entry_ptr->local_pinned ) );
+ HDassert ( ! ( dirty && ( ! global ) ) );
- if ( via_unprotect ) {
+ if ( via_unprotect ) {
- lock_entry(file_ptr, idx);
+ lock_entry(file_ptr, idx);
- if ( dirty ) {
+ if ( dirty ) {
- flags |= H5AC__DIRTIED_FLAG;
- }
+ flags |= H5AC__DIRTIED_FLAG;
+ }
- unlock_entry(file_ptr, idx, flags);
+ unlock_entry(file_ptr, idx, flags);
- } else {
+ } else {
- if ( dirty ) {
+ if ( dirty ) {
- mark_entry_dirty(idx);
+ mark_entry_dirty(idx);
- }
+ }
- result = H5AC_unpin_entry(entry_ptr);
+ result = H5AC_unpin_entry(entry_ptr);
- if ( result < 0 ) {
+ if ( result < 0 ) {
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, FUNC);
}
- }
- }
+ }
+ }
HDassert( ! ((entry_ptr->header).is_pinned) );
- if ( global ) {
+ if ( global ) {
- entry_ptr->global_pinned = FALSE;
+ entry_ptr->global_pinned = FALSE;
- } else {
+ } else {
- entry_ptr->local_pinned = FALSE;
+ entry_ptr->local_pinned = FALSE;
- }
+ }
}
return;
@@ -5183,18 +5182,18 @@ unpin_entry(H5F_t * file_ptr,
/****************************** test functions *******************************/
/*****************************************************************************/
-
+
/*****************************************************************************
*
- * Function: server_smoke_check()
+ * Function: server_smoke_check()
*
- * Purpose: Quick smoke check for the server process.
+ * Purpose: Quick smoke check for the server process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/21/05
+ * Programmer: JRM -- 12/21/05
*
*****************************************************************************/
static hbool_t
@@ -5215,12 +5214,12 @@ server_smoke_check(void)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5286,9 +5285,9 @@ server_smoke_check(void)
#endif /* DO_WRITE_REQ_ACK */
- do_sync();
+ do_sync();
- /* barrier to allow all writes to complete */
+ /* barrier to allow all writes to complete */
if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
success = FALSE;
@@ -5303,12 +5302,12 @@ server_smoke_check(void)
if ( success ) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 1);
- }
+ }
if ( success ) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 0);
- }
+ }
if ( success ) {
@@ -5320,7 +5319,7 @@ server_smoke_check(void)
success = verify_total_reads(0);
}
- /* barrier to allow all writes to complete */
+ /* barrier to allow all writes to complete */
if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
success = FALSE;
@@ -5392,7 +5391,7 @@ server_smoke_check(void)
}
}
- /* barrier to allow all writes to complete */
+ /* barrier to allow all writes to complete */
if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
success = FALSE;
@@ -5407,12 +5406,12 @@ server_smoke_check(void)
if ( success ) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 1);
- }
+ }
if ( success ) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 1);
- }
+ }
if ( success ) {
@@ -5456,12 +5455,12 @@ server_smoke_check(void)
if ( success ) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 0);
- }
+ }
if ( success ) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 0);
- }
+ }
if ( success ) {
@@ -5514,9 +5513,9 @@ server_smoke_check(void)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -5531,18 +5530,18 @@ server_smoke_check(void)
} /* server_smoke_check() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_1()
+ * Function: smoke_check_1()
*
- * Purpose: First smoke check for the parallel cache.
+ * Purpose: First smoke check for the parallel cache.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/4/06
+ * Programmer: JRM -- 1/4/06
*
*****************************************************************************/
static hbool_t
@@ -5558,23 +5557,23 @@ smoke_check_1(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- process 0 only md write strategy");
+ TESTING("smoke check #1 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- distributed md write strategy");
+ TESTING("smoke check #1 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- unknown md write strategy");
+ TESTING("smoke check #1 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -5583,12 +5582,12 @@ smoke_check_1(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5602,7 +5601,7 @@ smoke_check_1(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5614,24 +5613,24 @@ smoke_check_1(int metadata_write_strategy)
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
}
/* Move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
}
if ( fid >= 0 ) {
@@ -5640,7 +5639,7 @@ smoke_check_1(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5686,9 +5685,9 @@ smoke_check_1(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -5703,21 +5702,21 @@ smoke_check_1(int metadata_write_strategy)
} /* smoke_check_1() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_2()
+ * Function: smoke_check_2()
*
- * Purpose: Second smoke check for the parallel cache.
+ * Purpose: Second smoke check for the parallel cache.
*
- * Introduce random reads, but keep all processes with roughly
- * the same work load.
+ * Introduce random reads, but keep all processes with roughly
+ * the same work load.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/12/06
+ * Programmer: JRM -- 1/12/06
*
*****************************************************************************/
static hbool_t
@@ -5733,23 +5732,23 @@ smoke_check_2(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- process 0 only md write strategy");
+ TESTING("smoke check #2 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- distributed md write strategy");
+ TESTING("smoke check #2 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- unknown md write strategy");
+ TESTING("smoke check #2 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -5758,12 +5757,12 @@ smoke_check_2(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5777,7 +5776,7 @@ smoke_check_2(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5788,73 +5787,73 @@ smoke_check_2(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10);
}
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
- {
- /* Make sure we don't step on any locally pinned entries */
- if ( data[i].local_pinned ) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
- }
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
+ {
+ /* Make sure we don't step on any locally pinned entries */
+ if ( data[i].local_pinned ) {
+ unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ }
- pin_entry(file_ptr, i, TRUE, FALSE);
- }
+ pin_entry(file_ptr, i, TRUE, FALSE);
+ }
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
- 0, 100);
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 4),
- 0, 3);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 20),
+ 0, 100);
+ local_pin_and_unpin_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 4),
+ 0, 3);
}
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 10),
- 0, 100);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 10),
+ 0, 100);
}
- /* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ /* we can't move pinned entries, so release any local pins now. */
+ local_unpin_all_entries(file_ptr, FALSE);
/* Move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- ((virt_num_data_entries / 50) - 1),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ ((virt_num_data_entries / 50) - 1),
0, 100);
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 100),
- 0, 100);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 100),
+ 0, 100);
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
- {
- hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 );
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
+ {
+ hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 );
+ hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 );
- unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
- }
+ unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ }
if ( fid >= 0 ) {
@@ -5862,7 +5861,7 @@ smoke_check_2(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5908,9 +5907,9 @@ smoke_check_2(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -5925,24 +5924,24 @@ smoke_check_2(int metadata_write_strategy)
} /* smoke_check_2() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_3()
+ * Function: smoke_check_3()
*
- * Purpose: Third smoke check for the parallel cache.
+ * Purpose: Third smoke check for the parallel cache.
*
- * Use random reads to vary the loads on the diffferent
- * processors. Also force different cache size adjustments.
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
*
- * In this test, load process 0 heavily, and the other
- * processes lightly.
+ * In this test, load process 0 heavily, and the other
+ * processes lightly.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/13/06
+ * Programmer: JRM -- 1/13/06
*
*****************************************************************************/
static hbool_t
@@ -5962,23 +5961,23 @@ smoke_check_3(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- process 0 only md write strategy");
+ TESTING("smoke check #3 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- distributed md write strategy");
+ TESTING("smoke check #3 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- unknown md write strategy");
+ TESTING("smoke check #3 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -5987,12 +5986,12 @@ smoke_check_3(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6006,7 +6005,7 @@ smoke_check_3(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6020,7 +6019,7 @@ smoke_check_3(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6030,48 +6029,48 @@ smoke_check_3(int metadata_write_strategy)
max_count = min_count + 50;
for ( i = (virt_num_data_entries / 4);
- i < (virt_num_data_entries / 2);
- i++ )
+ i < (virt_num_data_entries / 2);
+ i++ )
{
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i % 59 == 0 ) {
+ if ( i % 59 == 0 ) {
- hbool_t dirty = ( (i % 2) == 0);
+ hbool_t dirty = ( (i % 2) == 0);
- if ( data[i].local_pinned ) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
- }
+ if ( data[i].local_pinned ) {
+ unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ }
- pin_entry(file_ptr, i, TRUE, dirty);
+ pin_entry(file_ptr, i, TRUE, dirty);
- HDassert( !dirty || data[i].header.is_dirty );
- HDassert( data[i].header.is_pinned );
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
- }
+ HDassert( !dirty || data[i].header.is_dirty );
+ HDassert( data[i].header.is_pinned );
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+ }
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
- local_pin_and_unpin_random_entries(file_ptr, 0,
+ local_pin_and_unpin_random_entries(file_ptr, 0,
virt_num_data_entries / 4,
- 0, (file_mpi_rank + 2));
+ 0, (file_mpi_rank + 2));
- }
+ }
- /* flush the file to be sure that we have no problems flushing
- * pinned entries
- */
+ /* flush the file to be sure that we have no problems flushing
+ * pinned entries
+ */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6087,27 +6086,27 @@ smoke_check_3(int metadata_write_strategy)
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
+ if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
-
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
-
- unpin_entry(file_ptr, i, TRUE, dirty,
- via_unprotect);
- }
- if ( i % 2 == 0 ) {
-
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- local_pin_and_unpin_random_entries(file_ptr, 0,
- virt_num_data_entries / 2,
- 0, 2);
- lock_and_unlock_random_entries(file_ptr,
+ hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
+
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+
+ unpin_entry(file_ptr, i, TRUE, dirty,
+ via_unprotect);
+ }
+ if ( i % 2 == 0 ) {
+
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ local_pin_and_unpin_random_entries(file_ptr, 0,
+ virt_num_data_entries / 2,
+ 0, 2);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
- }
+ }
}
min_idx = 0;
@@ -6120,9 +6119,9 @@ smoke_check_3(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr,
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
}
@@ -6135,22 +6134,22 @@ smoke_check_3(int metadata_write_strategy)
/* move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 20),
min_count, max_count);
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 40),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 40),
min_count, max_count);
}
@@ -6162,16 +6161,16 @@ smoke_check_3(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 2),
- 0, 5);
+ local_pin_and_unpin_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 2),
+ 0, 5);
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6185,7 +6184,7 @@ smoke_check_3(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6232,9 +6231,9 @@ smoke_check_3(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -6249,24 +6248,24 @@ smoke_check_3(int metadata_write_strategy)
} /* smoke_check_3() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_4()
+ * Function: smoke_check_4()
*
- * Purpose: Fourth smoke check for the parallel cache.
+ * Purpose: Fourth smoke check for the parallel cache.
*
- * Use random reads to vary the loads on the diffferent
- * processors. Also force different cache size adjustments.
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
*
- * In this test, load process 0 lightly, and the other
- * processes heavily.
+ * In this test, load process 0 lightly, and the other
+ * processes heavily.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/13/06
+ * Programmer: JRM -- 1/13/06
*
*****************************************************************************/
static hbool_t
@@ -6286,23 +6285,23 @@ smoke_check_4(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- process 0 only md write strategy");
+ TESTING("smoke check #4 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- distributed md write strategy");
+ TESTING("smoke check #4 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- unknown md write strategy");
+ TESTING("smoke check #4 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -6311,12 +6310,12 @@ smoke_check_4(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6330,7 +6329,7 @@ smoke_check_4(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6345,7 +6344,7 @@ smoke_check_4(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6354,23 +6353,23 @@ smoke_check_4(int metadata_write_strategy)
max_count = min_count + 100;
for ( i = (virt_num_data_entries / 4);
- i < (virt_num_data_entries / 2);
- i++ )
+ i < (virt_num_data_entries / 2);
+ i++ )
{
- if ( i % 2 == 0 ) {
+ if ( i % 2 == 0 ) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- } else {
+ } else {
- /* Insert some entries pinned, and then unpin them
- * immediately. We have tested pinned entries elsewhere,
- * so it should be sufficient to verify that the
- * entries are in fact pinned (which unpin_entry() should do).
- */
+ /* Insert some entries pinned, and then unpin them
+ * immediately. We have tested pinned entries elsewhere,
+ * so it should be sufficient to verify that the
+ * entries are in fact pinned (which unpin_entry() should do).
+ */
insert_entry(cache_ptr, file_ptr, i, H5C__PIN_ENTRY_FLAG);
unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
- }
+ }
if ( i % 59 == 0 ) {
@@ -6390,19 +6389,19 @@ smoke_check_4(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 4),
+ (virt_num_data_entries / 4),
0, (file_mpi_rank + 2));
}
/* flush the file to be sure that we have no problems flushing
- * pinned entries
- */
+ * pinned entries
+ */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
@@ -6429,13 +6428,13 @@ smoke_check_4(int metadata_write_strategy)
unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
}
- if ( i % 2 == 0 ) {
+ if ( i % 2 == 0 ) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- lock_and_unlock_random_entries(file_ptr,
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
- }
+ }
}
min_idx = 0;
@@ -6444,14 +6443,14 @@ smoke_check_4(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr,
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
}
- /* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ /* we can't move pinned entries, so release any local pins now. */
+ local_unpin_all_entries(file_ptr, FALSE);
min_count = 10 * (file_mpi_rank % 4);
max_count = min_count + 100;
@@ -6459,22 +6458,22 @@ smoke_check_4(int metadata_write_strategy)
/* move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 20),
min_count, max_count);
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 40),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 40),
min_count, max_count);
}
@@ -6486,12 +6485,12 @@ smoke_check_4(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6502,7 +6501,7 @@ smoke_check_4(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6549,9 +6548,9 @@ smoke_check_4(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -6566,19 +6565,19 @@ smoke_check_4(int metadata_write_strategy)
} /* smoke_check_4() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_5()
+ * Function: smoke_check_5()
*
- * Purpose: Similar to smoke check 1, but modified to verify that
- * H5AC_mark_entry_dirty() works in the parallel case.
+ * Purpose: Similar to smoke check 1, but modified to verify that
+ * H5AC_mark_entry_dirty() works in the parallel case.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/18/06
+ * Programmer: JRM -- 5/18/06
*
*****************************************************************************/
static hbool_t
@@ -6594,23 +6593,23 @@ smoke_check_5(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- process 0 only md write strategy");
+ TESTING("smoke check #5 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- distributed md write strategy");
+ TESTING("smoke check #5 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- unknown md write strategy");
+ TESTING("smoke check #5 -- unknown md write strategy");
}
- break;
+ break;
}
@@ -6620,12 +6619,12 @@ smoke_check_5(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6640,7 +6639,7 @@ smoke_check_5(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6650,60 +6649,60 @@ smoke_check_5(int metadata_write_strategy)
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
}
- /* flush the file so we can lock known clean entries. */
+ /* flush the file so we can lock known clean entries. */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
for ( i = 0; i < (virt_num_data_entries / 4); i++ )
{
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
- if ( i % 2 == 0 )
- {
- mark_entry_dirty(i);
- }
+ if ( i % 2 == 0 )
+ {
+ mark_entry_dirty(i);
+ }
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i % 2 == 1 )
- {
- if ( i % 4 == 1 ) {
+ if ( i % 2 == 1 )
+ {
+ if ( i % 4 == 1 ) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- }
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ }
- expunge_entry(file_ptr, i);
- }
+ expunge_entry(file_ptr, i);
+ }
}
for ( i = (virt_num_data_entries / 2) - 1;
i >= (virt_num_data_entries / 4);
- i-- )
+ i-- )
{
- pin_entry(file_ptr, i, TRUE, FALSE);
+ pin_entry(file_ptr, i, TRUE, FALSE);
- if ( i % 2 == 0 )
- {
- if ( i % 8 <= 4 ) {
+ if ( i % 2 == 0 )
+ {
+ if ( i % 8 <= 4 ) {
- resize_entry(i, data[i].len / 2);
- }
+ resize_entry(i, data[i].len / 2);
+ }
mark_entry_dirty(i);
- if ( i % 8 <= 4 ) {
+ if ( i % 8 <= 4 ) {
- resize_entry(i, data[i].len);
- }
- }
+ resize_entry(i, data[i].len);
+ }
+ }
- unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
}
if ( fid >= 0 ) {
@@ -6712,7 +6711,7 @@ smoke_check_5(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6758,9 +6757,9 @@ smoke_check_5(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -6775,10 +6774,10 @@ smoke_check_5(int metadata_write_strategy)
} /* smoke_check_5() */
-
+
/*****************************************************************************
*
- * Function: trace_file_check()
+ * Function: trace_file_check()
*
* Purpose: A basic test of the trace file capability. In essence,
* we invoke all operations that generate trace file output,
@@ -6808,11 +6807,11 @@ smoke_check_5(int metadata_write_strategy)
*
* This test is skipped if H5_METADATA_TRACE_FILE is undefined.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 6/13/06
+ * Programmer: JRM -- 6/13/06
*
*****************************************************************************/
static hbool_t
@@ -6897,25 +6896,25 @@ trace_file_check(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
#ifdef H5_METADATA_TRACE_FILE
expected_output = &expected_output_0;
#endif /* H5_METADATA_TRACE_FILE */
if ( world_mpi_rank == 0 ) {
- TESTING(
- "trace file collection -- process 0 only md write strategy");
+ TESTING(
+ "trace file collection -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
#ifdef H5_METADATA_TRACE_FILE
expected_output = &expected_output_1;
#endif /* H5_METADATA_TRACE_FILE */
if ( world_mpi_rank == 0 ) {
- TESTING(
- "trace file collection -- distributed md write strategy");
+ TESTING(
+ "trace file collection -- distributed md write strategy");
}
- break;
+ break;
default:
#ifdef H5_METADATA_TRACE_FILE
@@ -6925,9 +6924,9 @@ trace_file_check(int metadata_write_strategy)
expected_output = &expected_output_0;
#endif /* H5_METADATA_TRACE_FILE */
if ( world_mpi_rank == 0 ) {
- TESTING("trace file collection -- unknown md write strategy");
+ TESTING("trace file collection -- unknown md write strategy");
}
- break;
+ break;
}
#ifdef H5_METADATA_TRACE_FILE
@@ -6938,12 +6937,12 @@ trace_file_check(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6958,7 +6957,7 @@ trace_file_check(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6970,58 +6969,58 @@ trace_file_check(int metadata_write_strategy)
if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
!= SUCCEED ) {
- nerrors++;
- HDfprintf(stdout,
+ nerrors++;
+ HDfprintf(stdout,
"%d:%s: H5AC_get_cache_auto_resize_config() failed.\n",
world_mpi_rank, FUNC);
} else {
config.open_trace_file = TRUE;
- strcpy(config.trace_file_name, "t_cache_trace.txt");
+ strcpy(config.trace_file_name, "t_cache_trace.txt");
if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ != SUCCEED ) {
- nerrors++;
- HDfprintf(stdout,
+ nerrors++;
+ HDfprintf(stdout,
"%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
world_mpi_rank, FUNC);
}
}
}
- insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET);
- lock_entry(file_ptr, 0);
- mark_entry_dirty(0);
- unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, 0);
+ mark_entry_dirty(0);
+ unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
- lock_entry(file_ptr, 1);
+ lock_entry(file_ptr, 1);
pin_protected_entry(1, TRUE);
- unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
+ unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
unpin_entry(file_ptr, 1, TRUE, FALSE, FALSE);
expunge_entry(file_ptr, 1);
- lock_entry(file_ptr, 2);
+ lock_entry(file_ptr, 2);
pin_protected_entry(2, TRUE);
- unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
- mark_entry_dirty(2);
+ unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
+ mark_entry_dirty(2);
resize_entry(2, data[2].len / 2);
resize_entry(2, data[2].len);
unpin_entry(file_ptr, 2, TRUE, FALSE, FALSE);
- move_entry(file_ptr, 0, 20);
- move_entry(file_ptr, 0, 20);
+ move_entry(file_ptr, 0, 20);
+ move_entry(file_ptr, 0, 20);
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7033,8 +7032,8 @@ trace_file_check(int metadata_write_strategy)
if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
!= SUCCEED ) {
- nerrors++;
- HDfprintf(stdout,
+ nerrors++;
+ HDfprintf(stdout,
"%d:%s: H5AC_get_cache_auto_resize_config() failed.\n",
world_mpi_rank, FUNC);
@@ -7042,13 +7041,13 @@ trace_file_check(int metadata_write_strategy)
config.open_trace_file = FALSE;
config.close_trace_file = TRUE;
- config.trace_file_name[0] = '\0';
+ config.trace_file_name[0] = '\0';
if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ != SUCCEED ) {
- nerrors++;
- HDfprintf(stdout,
+ nerrors++;
+ HDfprintf(stdout,
"%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
world_mpi_rank, FUNC);
}
@@ -7061,7 +7060,7 @@ trace_file_check(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7104,10 +7103,10 @@ trace_file_check(int metadata_write_strategy)
if ( nerrors == 0 ) {
- sprintf(trace_file_name, "t_cache_trace.txt.%d",
- (int)file_mpi_rank);
+ sprintf(trace_file_name, "t_cache_trace.txt.%d",
+ (int)file_mpi_rank);
- if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
+ if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
nerrors++;
if ( verbose ) {
@@ -7115,59 +7114,59 @@ trace_file_check(int metadata_write_strategy)
world_mpi_rank, FUNC);
}
}
- }
+ }
- i = 0;
- while ( ( nerrors == 0 ) && ( ! done ) )
- {
- if ( (*expected_output)[i] == NULL ) {
+ i = 0;
+ while ( ( nerrors == 0 ) && ( ! done ) )
+ {
+ if ( (*expected_output)[i] == NULL ) {
- expected_line_len = 0;
+ expected_line_len = 0;
- } else {
+ } else {
- expected_line_len = HDstrlen((*expected_output)[i]);
- }
+ expected_line_len = HDstrlen((*expected_output)[i]);
+ }
- if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) {
+ if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) {
- actual_line_len = strlen(buffer);
+ actual_line_len = strlen(buffer);
- } else {
+ } else {
- actual_line_len = 0;
- }
+ actual_line_len = 0;
+ }
- if ( ( actual_line_len == 0 ) && ( expected_line_len == 0 ) ) {
+ if ( ( actual_line_len == 0 ) && ( expected_line_len == 0 ) ) {
- done = TRUE;
+ done = TRUE;
- } else if ( ( actual_line_len != expected_line_len ) ||
- ( HDstrcmp(buffer, (*expected_output)[i]) != 0 ) ) {
+ } else if ( ( actual_line_len != expected_line_len ) ||
+ ( HDstrcmp(buffer, (*expected_output)[i]) != 0 ) ) {
- nerrors++;
+ nerrors++;
if ( verbose ) {
HDfprintf(stdout,
- "%d:%s: Unexpected data in trace file line %d.\n",
+ "%d:%s: Unexpected data in trace file line %d.\n",
world_mpi_rank, FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n",
- world_mpi_rank, FUNC, (*expected_output)[i],
- expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n",
- world_mpi_rank, FUNC, buffer,
- actual_line_len);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n",
+ world_mpi_rank, FUNC, (*expected_output)[i],
+ expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n",
+ world_mpi_rank, FUNC, buffer,
+ actual_line_len);
}
- } else {
- i++;
- }
- }
+ } else {
+ i++;
+ }
+ }
- if ( trace_file_ptr != NULL ) {
+ if ( trace_file_ptr != NULL ) {
- HDfclose(trace_file_ptr);
- trace_file_ptr = NULL;
+ HDfclose(trace_file_ptr);
+ trace_file_ptr = NULL;
#if 1
- HDremove(trace_file_name);
+ HDremove(trace_file_name);
#endif
}
}
@@ -7176,9 +7175,9 @@ trace_file_check(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -7204,18 +7203,18 @@ trace_file_check(int metadata_write_strategy)
} /* trace_file_check() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_6()
+ * Function: smoke_check_6()
*
- * Purpose: Sixth smoke check for the parallel cache.
+ * Purpose: Sixth smoke check for the parallel cache.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/13/06
+ * Programmer: JRM -- 1/13/06
*
*****************************************************************************/
static hbool_t
@@ -7231,23 +7230,23 @@ smoke_check_6(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- process 0 only md write strategy");
+ TESTING("smoke check #6 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- distributed md write strategy");
+ TESTING("smoke check #6 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- unknown md write strategy");
+ TESTING("smoke check #6 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -7256,12 +7255,12 @@ smoke_check_6(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7277,7 +7276,7 @@ smoke_check_6(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7327,11 +7326,11 @@ smoke_check_6(int metadata_write_strategy)
HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
- /* flush the file */
+ /* flush the file */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7343,7 +7342,7 @@ smoke_check_6(int metadata_write_strategy)
struct datum * entry_ptr;
entry_ptr = &(data[i]);
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
if(TRUE != entry_ptr->header.coll_access) {
nerrors++;
@@ -7364,7 +7363,7 @@ smoke_check_6(int metadata_write_strategy)
struct datum * entry_ptr;
entry_ptr = &(data[i]);
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
if(FALSE != entry_ptr->header.coll_access) {
nerrors++;
@@ -7389,7 +7388,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7437,9 +7436,9 @@ smoke_check_6(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -7454,18 +7453,18 @@ smoke_check_6(int metadata_write_strategy)
} /* smoke_check_6() */
-
+
/*****************************************************************************
*
- * Function: main()
+ * Function: main()
*
- * Purpose: Main function for the parallel cache test.
+ * Purpose: Main function for the parallel cache test.
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: 1
+ * Failure: 1
*
- * Programmer: JRM -- 12/23/05
+ * Programmer: JRM -- 12/23/05
*
*****************************************************************************/
int
@@ -7492,7 +7491,7 @@ main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d:Failed to turn off atexit processing. Continue.\n",
+ printf("%d:Failed to turn off atexit processing. Continue.\n",
mpi_rank);
};
H5open();
@@ -7503,32 +7502,32 @@ main(int argc, char **argv)
#endif /* JRM */
if ( express_test ) {
- virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
+ virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
} else {
- virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
+ virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
}
#ifdef H5_HAVE_MPE
- if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); }
+ if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
if (MAINPROCESS){
- printf("===================================\n");
- printf("Parallel metadata cache tests\n");
- printf(" mpi_size = %d\n", mpi_size);
- printf(" express_test = %d\n", express_test);
- printf("===================================\n");
+ printf("===================================\n");
+ printf("Parallel metadata cache tests\n");
+ printf(" mpi_size = %d\n", mpi_size);
+ printf(" express_test = %d\n", express_test);
+ printf("===================================\n");
}
if ( mpi_size < 3 ) {
if ( MAINPROCESS ) {
- printf(" Need at least 3 processes. Exiting.\n");
+ printf(" Need at least 3 processes. Exiting.\n");
}
goto finish;
}
@@ -7547,8 +7546,8 @@ main(int argc, char **argv)
/* setup file access property list with the world communicator */
if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n",
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n",
world_mpi_rank, FUNC);
}
}
@@ -7580,8 +7579,8 @@ main(int argc, char **argv)
/* close the fapl before we set it up again */
if ( H5Pclose(fapl) < 0 ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n",
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7591,9 +7590,9 @@ main(int argc, char **argv)
/* setup file access property list */
if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
- nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n",
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n",
world_mpi_rank, FUNC);
}
}
@@ -7601,8 +7600,8 @@ main(int argc, char **argv)
if ( H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0 ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n",
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n",
world_mpi_rank, FUNC);
}
}
@@ -7620,7 +7619,7 @@ main(int argc, char **argv)
HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
}
- goto finish;
+ goto finish;
}
/* run the tests */
@@ -7673,16 +7672,16 @@ finish:
* and exit.
*/
MPI_Barrier(MPI_COMM_WORLD);
- if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
- if (failures){
- printf("***metadata cache tests detected %d failures***\n",
+ if (MAINPROCESS){ /* only process 0 reports */
+ printf("===================================\n");
+ if (failures){
+ printf("***metadata cache tests detected %d failures***\n",
failures);
- }
- else{
- printf("metadata cache tests finished with no failures\n");
- }
- printf("===================================\n");
+ }
+ else{
+ printf("metadata cache tests finished with no failures\n");
+ }
+ printf("===================================\n");
}
takedown_derived_types();
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 524a63f..7fe0020 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -14,12 +14,11 @@
/* Programmer: John Mainzer
* 7/13/15
*
- * This file contains tests specific to the cache image
- * feature implemented in H5C.c
+ * This file contains tests specific to the cache image
+ * feature implemented in H5C.c
*/
-#include "h5test.h"
#include "testphdf5.h"
-#include "testpar.h"
+
#include "cache_common.h"
#include "genall5.h"
@@ -28,7 +27,7 @@
#define DSET_SIZE (40 * CHUNK_SIZE)
#define MAX_NUM_DSETS 256
#define PAR_NUM_DSETS 32
-#define PAGE_SIZE (4 * 1024)
+#define PAGE_SIZE (4 * 1024)
#define PB_SIZE (64 * PAGE_SIZE)
/* global variable declarations: */
@@ -59,8 +58,8 @@ static void open_hdf5_file(const hbool_t create_file,
hid_t * file_id_ptr,
H5F_t ** file_ptr_ptr,
H5C_t ** cache_ptr_ptr,
- MPI_Comm comm,
- MPI_Info info,
+ MPI_Comm comm,
+ MPI_Info info,
int l_facc_type,
const hbool_t all_coll_metadata_ops,
const hbool_t coll_metadata_write,
@@ -70,11 +69,11 @@ static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
/* local test function declarations */
-static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display);
static void usage(void);
static unsigned construct_test_file(int test_file_index);
-static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
+static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
int mpi_size);
static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
@@ -83,15 +82,15 @@ static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size);
static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
/* top level test function declarations */
-static unsigned verify_cache_image_RO(int file_name_id,
+static unsigned verify_cache_image_RO(int file_name_id,
int md_write_strat, int mpi_rank);
-static unsigned verify_cache_image_RW(int file_name_id,
+static unsigned verify_cache_image_RW(int file_name_id,
int md_write_strat, int mpi_rank);
-static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
+static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
int mpi_rank, int mpi_size);
-
+
/****************************************************************************/
/***************************** Utility Functions ****************************/
/****************************************************************************/
@@ -99,52 +98,52 @@ static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
/*-------------------------------------------------------------------------
* Function: construct_test_file()
*
- * Purpose: This function attempts to mimic the typical "poor man's
- * parallel use case in which the file is passed between
- * processes, each of which open the file, write some data,
- * close the file, and then pass control on to the next
- * process.
+ * Purpose: This function attempts to mimic the typical "poor man's
+ * parallel use case in which the file is passed between
+ * processes, each of which open the file, write some data,
+ * close the file, and then pass control on to the next
+ * process.
*
- * In this case, we create one group for each process, and
- * populate it with a "zoo" of HDF5 objects selected to
- * (ideally) exercise all HDF5 on disk data structures.
+ * In this case, we create one group for each process, and
+ * populate it with a "zoo" of HDF5 objects selected to
+ * (ideally) exercise all HDF5 on disk data structures.
*
- * The end result is a test file used verify that PHDF5
- * can open a file with a cache image.
+ * The end result is a test file used verify that PHDF5
+ * can open a file with a cache image.
*
- * Cycle of operation
+ * Cycle of operation
*
- * 1) Create a HDF5 file with the cache image FAPL entry.
+ * 1) Create a HDF5 file with the cache image FAPL entry.
*
- * Verify that the cache is informed of the cache image
- * FAPL entry.
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
*
- * Set all cache image flags, forcing full functionality.
+ * Set all cache image flags, forcing full functionality.
*
- * 2) Create a data set in the file.
+ * 2) Create a data set in the file.
*
- * 3) Close the file.
+ * 3) Close the file.
*
- * 4) Open the file.
+ * 4) Open the file.
*
- * Verify that the metadata cache is instructed to load
+ * Verify that the metadata cache is instructed to load
* the metadata cache image.
*
- * 5) Create a data set in the file.
+ * 5) Create a data set in the file.
*
- * 6) Close the file. If enough datasets have been created
+ * 6) Close the file. If enough datasets have been created
* goto 7. Otherwise return to 4.
*
- * 7) Open the file R/O.
+ * 7) Open the file R/O.
*
* Verify that the file contains a metadata cache image
* superblock extension message.
- *
- * 8) Verify all data sets.
*
- * Verify that the cache image has been loaded.
+ * 8) Verify all data sets.
*
- * 9) close the file.
+ * Verify that the cache image has been loaded.
+ *
+ * 9) close the file.
*
* Return: void
*
@@ -153,7 +152,7 @@ static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -175,7 +174,7 @@ construct_test_file(int test_file_index)
pass = TRUE;
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -184,7 +183,7 @@ construct_test_file(int test_file_index)
HDassert(FILENAMES[test_file_index]);
- if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
+ if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
filename, sizeof(filename))
== NULL ) {
@@ -193,13 +192,13 @@ construct_test_file(int test_file_index)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 1) Create a HDF5 file with the cache image FAPL entry.
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
*
- * Verify that the cache is informed of the cache image FAPL entry.
+ * Verify that the cache is informed of the cache image FAPL entry.
*
* Set flags forcing full function of the cache image feature.
*/
@@ -210,7 +209,7 @@ construct_test_file(int test_file_index)
/* mdci_sbem_expected */ FALSE,
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
- /* config_fsm */ TRUE,
+ /* config_fsm */ TRUE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -225,7 +224,7 @@ construct_test_file(int test_file_index)
/* md_write_strat */ 0);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -247,7 +246,7 @@ construct_test_file(int test_file_index)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -263,7 +262,7 @@ construct_test_file(int test_file_index)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -282,7 +281,7 @@ construct_test_file(int test_file_index)
/* mdci_sbem_expected */ TRUE,
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -356,7 +355,7 @@ construct_test_file(int test_file_index)
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -375,7 +374,7 @@ construct_test_file(int test_file_index)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 8) Open and close all data sets.
+ /* 8) Open and close all data sets.
*
* Verify that the cache image has been loaded.
*/
@@ -414,15 +413,15 @@ construct_test_file(int test_file_index)
} /* construct_test_file() */
-
+
/*-------------------------------------------------------------------------
* Function: create_data_sets()
*
* Purpose: If pass is TRUE on entry, create the specified data sets
- * in the indicated file.
+ * in the indicated file.
*
- * Data sets and their contents must be well know, as we
- * will verify that they contain the expected data later.
+ * Data sets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
*
* On failure, set pass to FALSE, and set failure_mssg
* to point to an appropriate failure message.
@@ -436,15 +435,15 @@ construct_test_file(int test_file_index)
*
* Modifications:
*
- * Added min_dset and max_dset parameters and supporting
- * code. This allows the caller to specify a range of
- * datasets to create.
- * JRM -- 8/20/15
+ * Added min_dset and max_dset parameters and supporting
+ * code. This allows the caller to specify a range of
+ * datasets to create.
+ * JRM -- 8/20/15
*
*-------------------------------------------------------------------------
*/
-static void
+static void
create_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "create_data_sets()";
@@ -666,8 +665,8 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* read the chunk from file */
if ( pass ) {
- status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
- memspace_id, filespace_ids[m],
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
H5P_DEFAULT, data_chunk);
if ( status < 0 ) {
@@ -692,7 +691,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
valid_chunk = FALSE;
- if ( verbose ) {
+ if ( verbose ) {
HDfprintf(stdout,
"data_chunk[%0d][%0d] = %0d, expect %0d.\n",
@@ -702,7 +701,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
HDfprintf(stdout,
"m = %d, i = %d, j = %d, k = %d, l = %d\n",
m, i, j, k, l);
- }
+ }
}
}
}
@@ -712,12 +711,12 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if ( verbose ) {
- fprintf(stdout,
+ fprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, m);
- }
+ }
}
}
m++;
@@ -768,16 +767,16 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* create_data_sets() */
-
+
/*-------------------------------------------------------------------------
* Function: delete_data_sets()
*
- * Purpose: If pass is TRUE on entry, verify and then delete the
- * dataset(s) indicated by min_dset and max_dset in the
- * indicated file.
+ * Purpose: If pass is TRUE on entry, verify and then delete the
+ * dataset(s) indicated by min_dset and max_dset in the
+ * indicated file.
*
- * Data sets and their contents must be well know, as we
- * will verify that they contain the expected data later.
+ * Data sets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
*
* On failure, set pass to FALSE, and set failure_mssg
* to point to an appropriate failure message.
@@ -792,17 +791,17 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
* Modifications:
*
* None.
- * JRM -- 8/20/15
+ * JRM -- 8/20/15
*
*-------------------------------------------------------------------------
*/
-#if 0
+#if 0
/* this code will be needed to test full support of cache image
* in parallel -- keep it around against that day.
*
* -- JRM
*/
-static void
+static void
delete_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "delete_data_sets()";
@@ -833,11 +832,11 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
{
sprintf(dset_name, "/dset%03d", i);
- if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+ if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
pass = FALSE;
failure_mssg = "H5Ldelete() failed.";
- }
+ }
i++;
}
@@ -850,32 +849,32 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* delete_data_sets() */
#endif
-
+
/*-------------------------------------------------------------------------
* Function: open_hdf5_file()
*
- * Purpose: If pass is true on entry, create or open the specified HDF5
- * and test to see if it has a metadata cache image superblock
- * extension message.
+ * Purpose: If pass is true on entry, create or open the specified HDF5
+ * and test to see if it has a metadata cache image superblock
+ * extension message.
*
- * Set pass to FALSE and issue a suitable failure
- * message if either the file contains a metadata cache image
- * superblock extension and mdci_sbem_expected is TRUE, or
- * vise versa.
+ * Set pass to FALSE and issue a suitable failure
+ * message if either the file contains a metadata cache image
+ * superblock extension and mdci_sbem_expected is TRUE, or
+ * vise versa.
*
- * If mdci_sbem_expected is TRUE, also verify that the metadata
- * cache has been advised of this.
+ * If mdci_sbem_expected is TRUE, also verify that the metadata
+ * cache has been advised of this.
*
- * If read_only is TRUE, open the file read only. Otherwise
- * open the file read/write.
+ * If read_only is TRUE, open the file read only. Otherwise
+ * open the file read/write.
*
- * If set_mdci_fapl is TRUE, set the metadata cache image
- * FAPL entry when opening the file, and verify that the
- * metadata cache is notified.
+ * If set_mdci_fapl is TRUE, set the metadata cache image
+ * FAPL entry when opening the file, and verify that the
+ * metadata cache is notified.
*
- * If config_fsm is TRUE, setup the persistant free space
- * manager. Note that this flag may only be set if
- * create_file is also TRUE.
+ * If config_fsm is TRUE, setup the persistant free space
+ * manager. Note that this flag may only be set if
+ * create_file is also TRUE.
*
* Return pointers to the cache data structure and file data
* structures.
@@ -893,10 +892,10 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
* Modifications:
*
* Modified function to handle parallel file creates / opens.
- *
+ *
* JRM -- 2/1/17
*
- * Modified function to handle
+ * Modified function to handle
*
*-------------------------------------------------------------------------
*/
@@ -904,17 +903,17 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
static void
open_hdf5_file(const hbool_t create_file,
const hbool_t mdci_sbem_expected,
- const hbool_t read_only,
- const hbool_t set_mdci_fapl,
- const hbool_t config_fsm,
+ const hbool_t read_only,
+ const hbool_t set_mdci_fapl,
+ const hbool_t config_fsm,
const hbool_t enable_page_buffer,
- const char * hdf_file_name,
+ const char * hdf_file_name,
const unsigned cache_image_flags,
hid_t * file_id_ptr,
H5F_t ** file_ptr_ptr,
H5C_t ** cache_ptr_ptr,
- MPI_Comm comm,
- MPI_Info info,
+ MPI_Comm comm,
+ MPI_Info info,
int l_facc_type,
const hbool_t all_coll_metadata_ops,
const hbool_t coll_metadata_write,
@@ -941,8 +940,8 @@ open_hdf5_file(const hbool_t create_file,
if ( pass )
{
- /* opening the file both read only and with a cache image
- * requested is a contradiction. We resolve it by ignoring
+ /* opening the file both read only and with a cache image
+ * requested is a contradiction. We resolve it by ignoring
* the cache image request silently.
*/
if ( ( create_file && mdci_sbem_expected ) ||
@@ -969,7 +968,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create a file access propertly list. */
@@ -984,13 +983,13 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* call H5Pset_libver_bounds() on the fapl_id */
if ( pass ) {
- if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
+ if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
< 0 ) {
pass = FALSE;
@@ -998,7 +997,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get metadata cache image config -- verify that it is the default */
@@ -1016,7 +1015,7 @@ open_hdf5_file(const hbool_t create_file,
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
( cache_image_config.generate_image != FALSE ) ||
( cache_image_config.save_resize_status != FALSE ) ||
- ( cache_image_config.entry_ageout !=
+ ( cache_image_config.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ) {
pass = FALSE;
@@ -1024,7 +1023,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set metadata cache image fapl entry if indicated */
@@ -1044,24 +1043,24 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the persistant free space manager if indicated */
if ( ( pass ) && ( config_fsm ) ) {
- fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
- if ( fcpl_id <= 0 ) {
+ if ( fcpl_id <= 0 ) {
- pass = FALSE;
- failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
- }
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
+ }
}
if ( ( pass ) && ( config_fsm ) ) {
- if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
+ if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
TRUE, (hsize_t)1) == FAIL ) {
pass = FALSE;
failure_mssg = "H5Pset_file_space_strategy() failed.\n";
@@ -1077,7 +1076,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the page buffer if indicated */
@@ -1087,10 +1086,10 @@ open_hdf5_file(const hbool_t create_file,
pass = FALSE;
failure_mssg = "H5Pset_page_buffer_size() failed.\n";
- }
+ }
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1098,13 +1097,13 @@ open_hdf5_file(const hbool_t create_file,
/* set Parallel access with communicator */
if ( H5Pset_fapl_mpio(fapl_id, comm, info) < 0 ) {
-
+
pass = FALSE;
failure_mssg = "H5Pset_fapl_mpio() failed.\n";
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
@@ -1116,7 +1115,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
@@ -1128,7 +1127,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
@@ -1137,7 +1136,7 @@ open_hdf5_file(const hbool_t create_file,
H5AC_cache_config_t mdc_config;
mdc_config.version = H5C__CURR_AUTO_SIZE_CTL_VER;
-
+
if ( H5Pget_mdc_config(fapl_id, &mdc_config) < 0 ) {
pass = FALSE;
@@ -1153,7 +1152,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* open the file */
@@ -1161,13 +1160,13 @@ open_hdf5_file(const hbool_t create_file,
if ( create_file ) {
- if ( fcpl_id != -1 )
+ if ( fcpl_id != -1 )
- file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
- fcpl_id, fapl_id);
- else
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ fcpl_id, fapl_id);
+ else
- file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
H5P_DEFAULT, fapl_id);
} else {
@@ -1202,7 +1201,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get a pointer to the files internal data structure and then
@@ -1221,12 +1220,12 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* verify expected page buffer status. At present, page buffering
- * must be disabled in parallel -- hopefully this will change in the
+ /* verify expected page buffer status. At present, page buffering
+ * must be disabled in parallel -- hopefully this will change in the
* future.
*/
if ( pass ) {
@@ -1245,7 +1244,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1265,7 +1264,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1273,26 +1272,26 @@ open_hdf5_file(const hbool_t create_file,
if ( set_mdci_fapl ) {
- if ( read_only ) {
+ if ( read_only ) {
- if ( ( image_ctl.version !=
+ if ( ( image_ctl.version !=
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
( image_ctl.generate_image != FALSE ) ||
( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
+ ( image_ctl.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
pass = FALSE;
failure_mssg = "Unexpected image_ctl values(1).\n";
}
- } else {
+ } else {
- if ( ( image_ctl.version !=
+ if ( ( image_ctl.version !=
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
- ( image_ctl.generate_image != TRUE ) ||
+ ( image_ctl.generate_image != TRUE ) ||
( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
+ ( image_ctl.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
@@ -1302,11 +1301,11 @@ open_hdf5_file(const hbool_t create_file,
}
} else {
- if ( ( image_ctl.version !=
+ if ( ( image_ctl.version !=
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
( image_ctl.generate_image != FALSE ) ||
( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
+ ( image_ctl.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
@@ -1316,7 +1315,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( set_mdci_fapl ) ) {
@@ -1330,7 +1329,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1359,19 +1358,19 @@ open_hdf5_file(const hbool_t create_file,
pass = FALSE;
failure_mssg = "mdci sb extension message not present?\n";
}
- }
+ }
} else {
- if ( ( cache_ptr->load_image == TRUE ) ||
+ if ( ( cache_ptr->load_image == TRUE ) ||
( cache_ptr->delete_image == TRUE ) ) {
pass = FALSE;
failure_mssg = "mdci sb extension message present?\n";
- }
+ }
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1382,11 +1381,11 @@ open_hdf5_file(const hbool_t create_file,
}
if ( show_progress ) {
- HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n",
+ HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n",
fcn_name, cp++, pass);
if ( ! pass )
- HDfprintf(stdout, "%s: failure_mssg = %s\n",
+ HDfprintf(stdout, "%s: failure_mssg = %s\n",
fcn_name, failure_mssg);
}
@@ -1394,11 +1393,11 @@ open_hdf5_file(const hbool_t create_file,
} /* open_hdf5_file() */
-
+
/*-------------------------------------------------------------------------
* Function: par_create_dataset()
*
- * Purpose: Collectively create a chunked dataset, and fill it with
+ * Purpose: Collectively create a chunked dataset, and fill it with
* known values.
*
* On failure, set pass to FALSE, and set failure_mssg
@@ -1469,7 +1468,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set the dataset creation plist to specify that the raw data is
@@ -1487,7 +1486,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1503,7 +1502,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the dataset */
@@ -1520,7 +1519,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get the file space ID */
@@ -1535,7 +1534,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read and write chunks */
@@ -1553,7 +1552,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
@@ -1575,7 +1574,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the DXPL for collective I/O */
@@ -1590,7 +1589,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1602,7 +1601,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* initialize the dataset with collective writes */
@@ -1613,8 +1612,8 @@ par_create_dataset(int dset_num,
while ( ( pass ) && ( j < DSET_SIZE ) )
{
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
fcn_name, cp, pass);
/* initialize the slab */
@@ -1628,8 +1627,8 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
fcn_name, cp, pass);
/* select on disk hyperslab */
@@ -1637,7 +1636,7 @@ par_create_dataset(int dset_num,
offset[1] = (hsize_t)i;
offset[2] = (hsize_t)j;
a_size[0] = (hsize_t)1; /* size of hyperslab */
- a_size[1] = CHUNK_SIZE;
+ a_size[1] = CHUNK_SIZE;
a_size[2] = CHUNK_SIZE;
status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
offset, NULL, a_size, NULL);
@@ -1648,8 +1647,8 @@ par_create_dataset(int dset_num,
failure_mssg = "disk H5Sselect_hyperslab() failed.";
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
fcn_name, cp, pass);
/* write the chunk to file */
@@ -1662,8 +1661,8 @@ par_create_dataset(int dset_num,
failure_mssg = "H5Dwrite() failed.";
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
fcn_name, cp, pass);
j += CHUNK_SIZE;
@@ -1673,7 +1672,7 @@ par_create_dataset(int dset_num,
}
cp++;
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* read data from data sets and validate it */
@@ -1762,7 +1761,7 @@ par_create_dataset(int dset_num,
i += CHUNK_SIZE;
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the data space */
@@ -1807,14 +1806,14 @@ par_create_dataset(int dset_num,
failure_mssg = "H5Pclose(dxpl) failed.";
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_create_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: par_delete_dataset()
*
@@ -1862,7 +1861,7 @@ par_delete_dataset(int dset_num,
par_verify_dataset(dset_num, file_id, mpi_rank);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* delete the target dataset */
@@ -1875,21 +1874,21 @@ par_delete_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_delete_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: par_insert_cache_image()
*
* Purpose: Insert a cache image in the supplied file.
*
- * At present, cache image is not enabled in the parallel
- * so we have to insert the cache image with a serial
+ * At present, cache image is not enabled in the parallel
+ * so we have to insert the cache image with a serial
* process. Do this via a fork and an execv from process 0.
* All processes wait until the child process completes, and
* then return.
@@ -1935,22 +1934,22 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
/* fun and games to shutup the compiler */
char param0[32] = "t_cache_image";
char param1[32] = "ici";
- char * child_argv[] = {param0,
- param1,
- file_name_idx_str,
- mpi_size_str,
+ char * child_argv[] = {param0,
+ param1,
+ file_name_idx_str,
+ mpi_size_str,
NULL};
/* we may need to play with the path here */
if ( execv("t_cache_image", child_argv) == -1 ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"execl() of ici process failed. errno = %d(%s)\n",
errno, strerror(errno));
exit(1);
}
- } else if ( child_pid != -1 ) {
+ } else if ( child_pid != -1 ) {
/* this is the parent process -- wait until child is done */
if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
@@ -1973,7 +1972,7 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
}
} else { /* fork failed */
- HDfprintf(stdout,
+ HDfprintf(stdout,
"can't create process to insert cache image.\n");
pass = FALSE;
}
@@ -1982,8 +1981,8 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
if ( pass ) {
- /* make sure insertion of the cache image is complete
- * before proceeding
+ /* make sure insertion of the cache image is complete
+ * before proceeding
*/
MPI_Barrier(MPI_COMM_WORLD);
}
@@ -1992,7 +1991,7 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
} /* par_insert_cache_image() */
-
+
/*-------------------------------------------------------------------------
* Function: par_verify_dataset()
*
@@ -2072,7 +2071,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read */
@@ -2090,7 +2089,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
@@ -2112,7 +2111,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the DXPL for collective I/O */
@@ -2127,7 +2126,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -2139,7 +2138,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* read data from data sets and validate it */
@@ -2228,7 +2227,7 @@ par_verify_dataset(int dset_num,
i += CHUNK_SIZE;
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the file space */
@@ -2259,23 +2258,23 @@ par_verify_dataset(int dset_num,
failure_mssg = "H5Pclose(dxpl) failed.";
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_verify_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: serial_insert_cache_image()
*
* Purpose: Insert a cache image in the supplied file.
*
- * To populate the cache image, validate the contents
- * of the file before closing.
+ * To populate the cache image, validate the contents
+ * of the file before closing.
*
- * On failure, print an appropriate error message and
+ * On failure, print an appropriate error message and
* return FALSE.
*
* Return: TRUE if succussful, FALSE otherwise.
@@ -2329,7 +2328,7 @@ serial_insert_cache_image(int file_name_idx, int mpi_size )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 2) Open the PHDF5 file with the cache image FAPL entry.
+ /* 2) Open the PHDF5 file with the cache image FAPL entry.
*/
if ( pass ) {
@@ -2388,7 +2387,7 @@ serial_insert_cache_image(int file_name_idx, int mpi_size )
} /* serial_insert_cache_image() */
-
+
/*-------------------------------------------------------------------------
* Function: serial_verify_dataset()
*
@@ -2464,7 +2463,7 @@ serial_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read */
@@ -2482,7 +2481,7 @@ serial_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
@@ -2504,7 +2503,7 @@ serial_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2601,7 +2600,7 @@ serial_verify_dataset(int dset_num,
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the file space */
@@ -2625,18 +2624,18 @@ serial_verify_dataset(int dset_num,
failure_mssg = "H5Sclose(memspace_id) failed.";
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* serial_verify_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: parse_flags
*
- * Purpose: Parse the flags passed to this program, and load the
+ * Purpose: Parse the flags passed to this program, and load the
* values into the supplied field.
*
* Return: Success: 1
@@ -2648,7 +2647,7 @@ serial_verify_dataset(int dset_num,
*-------------------------------------------------------------------------
*/
static hbool_t
-parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display)
{
const char * fcn_name = "parse_flags()";
@@ -2669,7 +2668,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
}
- if ( ( success ) &&
+ if ( ( success ) &&
( ( argc != 1 ) && ( argc != 2 ) && ( argc != 4 ) ) ) {
success = FALSE;
@@ -2716,7 +2715,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
else if ( *ici_ptr )
- HDfprintf(stdout, "t_cache_image ici %d %d\n",
+ HDfprintf(stdout, "t_cache_image ici %d %d\n",
*file_idx_ptr, *mpi_size_ptr);
else
@@ -2728,7 +2727,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
} /* parse_flags() */
-
+
/*-------------------------------------------------------------------------
* Function: usage
*
@@ -2738,7 +2737,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
* Return: void
*
* Programmer: John Mainzer
- * 4/28/11
+ * 4/28/11
*
* Modifications:
*
@@ -2792,17 +2791,17 @@ usage(void)
return;
} /* usage() */
-
+
/*-------------------------------------------------------------------------
* Function: verify_data_sets()
*
- * Purpose: If pass is TRUE on entry, verify that the data sets in the
- * file exist and contain the expected data.
+ * Purpose: If pass is TRUE on entry, verify that the data sets in the
+ * file exist and contain the expected data.
*
- * Note that these data sets were created by
- * create_data_sets() above. Thus any changes in that
- * function must be reflected in this function, and
- * vise-versa.
+ * Note that these data sets were created by
+ * create_data_sets() above. Thus any changes in that
+ * function must be reflected in this function, and
+ * vise-versa.
*
* On failure, set pass to FALSE, and set failure_mssg
* to point to an appropriate failure message.
@@ -2816,15 +2815,15 @@ usage(void)
*
* Modifications:
*
- * Added min_dset and max_dset parameters and supporting
- * code. This allows the caller to specify a range of
- * datasets to verify.
- * JRM -- 8/20/15
+ * Added min_dset and max_dset parameters and supporting
+ * code. This allows the caller to specify a range of
+ * datasets to verify.
+ * JRM -- 8/20/15
*
*-------------------------------------------------------------------------
*/
-static void
+static void
verify_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "verify_data_sets()";
@@ -2952,8 +2951,8 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* read the chunk from file */
if ( pass ) {
- status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
- memspace_id, filespace_ids[m],
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
H5P_DEFAULT, data_chunk);
if ( status < 0 ) {
@@ -2978,8 +2977,8 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
valid_chunk = FALSE;
- if ( verbose ) {
-
+ if ( verbose ) {
+
HDfprintf(stdout,
"data_chunk[%0d][%0d] = %0d, expect %0d.\n",
k, l, data_chunk[k][l],
@@ -2988,7 +2987,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
HDfprintf(stdout,
"m = %d, i = %d, j = %d, k = %d, l = %d\n",
m, i, j, k, l);
- }
+ }
}
}
}
@@ -2998,12 +2997,12 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if ( verbose ) {
- fprintf(stdout,
+ fprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, m);
- }
+ }
}
}
m++;
@@ -3054,7 +3053,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* verify_data_sets() */
-
+
/****************************************************************************/
/******************************* Test Functions *****************************/
/****************************************************************************/
@@ -3062,21 +3061,21 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/*-------------------------------------------------------------------------
* Function: verify_cache_image_RO()
*
- * Purpose: Verify that a HDF5 file containing a cache image is
- * opened R/O and read correctly by PHDF5 with the specified
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened R/O and read correctly by PHDF5 with the specified
* metadata write strategy.
- *
+ *
* Basic cycle of operation is as follows:
*
- * 1) Open the test file created at the beginning of this
- * test read only.
+ * 1) Open the test file created at the beginning of this
+ * test read only.
*
- * Verify that the file contains a cache image.
+ * Verify that the file contains a cache image.
*
- * Verify that only process 0 reads the cache image.
+ * Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
- * image block from process 0.
+ * Verify that all other processes receive the cache
+ * image block from process 0.
*
* 2) Verify that the file contains the expected data.
*
@@ -3096,7 +3095,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -3135,14 +3134,14 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the file name */
if ( pass ) {
- if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
filename, sizeof(filename)) == NULL ) {
pass = FALSE;
@@ -3150,7 +3149,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3165,7 +3164,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3180,15 +3179,15 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 2) Verify that the file contains the expected data.
+ /* 2) Verify that the file contains the expected data.
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3210,14 +3209,14 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -3231,12 +3230,12 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 3) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3247,7 +3246,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3259,7 +3258,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3274,12 +3273,12 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Verify that the file contains the expected data. */
-
+
if ( pass ) {
verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
@@ -3298,7 +3297,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* 6) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3309,7 +3308,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3324,7 +3323,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
H5_FAILED();
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
}
}
@@ -3334,32 +3333,32 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
} /* verify_cache_image_RO() */
-
+
/*-------------------------------------------------------------------------
* Function: verify_cache_image_RW()
*
- * Purpose: Verify that a HDF5 file containing a cache image is
- * opened and read correctly by PHDF5 with the specified
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened and read correctly by PHDF5 with the specified
* metadata write strategy.
- *
+ *
* Basic cycle of operation is as follows:
*
- * 1) Open the test file created at the beginning of this
- * test.
+ * 1) Open the test file created at the beginning of this
+ * test.
*
- * Verify that the file contains a cache image.
+ * Verify that the file contains a cache image.
*
* 2) Verify that the file contains the expected data.
*
- * Verify that only process 0 reads the cache image.
+ * Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
- * image block from process 0.
+ * Verify that all other processes receive the cache
+ * image block from process 0.
*
*
* 3) Close the file.
*
- * 4) Open the file, and verify that it doesn't contain
+ * 4) Open the file, and verify that it doesn't contain
* a cache image.
*
* 5) Verify that the file contains the expected data.
@@ -3375,7 +3374,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -3414,14 +3413,14 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the file name */
if ( pass ) {
- if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
filename, sizeof(filename)) == NULL ) {
pass = FALSE;
@@ -3429,7 +3428,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3439,7 +3438,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3449,7 +3448,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ TRUE,
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3464,7 +3463,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3472,7 +3471,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
if ( pass ) {
@@ -3493,14 +3492,14 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -3514,12 +3513,12 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 3) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3530,7 +3529,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3542,7 +3541,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ FALSE,
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3557,12 +3556,12 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Verify that the file contains the expected data. */
-
+
if ( pass ) {
verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
@@ -3581,7 +3580,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* 6) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3592,7 +3591,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3622,7 +3621,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
H5_FAILED();
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
}
}
@@ -3632,20 +3631,20 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
} /* verify_cache_imageRW() */
-
+
/*****************************************************************************
*
* Function: smoke_check_1()
*
- * Purpose: Initial smoke check to verify correct behaviour of cache
+ * Purpose: Initial smoke check to verify correct behaviour of cache
* image in combination with parallel.
- *
+ *
* As cache image is currently disabled in the parallel case,
* we construct a test file in parallel, verify it in serial
* and generate a cache image in passing, and then verify
* it again in parallel.
*
- * In passing, also verify that page buffering is silently
+ * In passing, also verify that page buffering is silently
* disabled in the parallel case. Needless to say, this part
* of the test will have to be re-worked when and if page
* buffering is supported in parallel.
@@ -3700,7 +3699,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 1) Create a PHDF5 file without the cache image FAPL entry.
+ /* 1) Create a PHDF5 file without the cache image FAPL entry.
*
* Verify that a cache image is not requested
*/
@@ -3807,11 +3806,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 7) Verify the datasets in the file backwards
+ /* 7) Verify the datasets in the file backwards
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3841,11 +3840,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if ( ( mpi_rank == 0 ) && ( show_progress ) )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -3906,11 +3905,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 10) Verify the datasets in the file
+ /* 10) Verify the datasets in the file
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3940,11 +3939,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if ( ( mpi_rank == 0 ) && ( show_progress ) )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -4047,28 +4046,28 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
H5_FAILED();
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
fcn_name, failure_mssg);
}
}
return !pass;
-
+
} /* smoke_check_1() */
-
+
/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Run parallel tests on the cache image feature.
- *
- * At present, cache image is disabled in parallel, and
- * thus these tests are restructed to verifying that a
+ *
+ * At present, cache image is disabled in parallel, and
+ * thus these tests are restructed to verifying that a
* file with a cache image can be opened in the parallel
- * case, and verifying that instructions to create a
+ * case, and verifying that instructions to create a
* cache image are ignored in the parallel case.
*
- * WARNING: This test uses fork() and execve(), and
+ * WARNING: This test uses fork() and execve(), and
* therefore will not run on Windows.
*
* Return: Success: 0
@@ -4106,14 +4105,14 @@ main(int argc, char **argv)
HDfflush(stdout);
i = 0;
- while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
+ while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
HDfflush(stdout);
construct_test_file(i);
if ( pass ) {
-
+
printf("done.\n");
HDfflush(stdout);
@@ -4194,13 +4193,13 @@ main(int argc, char **argv)
/* we may need to play with the path here */
if ( execv("t_cache_image", child_argv) == -1 ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"execl() of setup process failed. errno = %d(%s)\n",
errno, strerror(errno));
exit(1);
}
- } else if ( child_pid != -1 ) {
+ } else if ( child_pid != -1 ) {
/* this is the parent process -- wait until child is done */
if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
@@ -4216,7 +4215,7 @@ main(int argc, char **argv)
} else {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"testfile construction complete -- proceeding with tests.\n");
}
} else { /* fork failed */
@@ -4229,14 +4228,14 @@ main(int argc, char **argv)
MPI_Barrier(MPI_COMM_WORLD);
- nerrs += verify_cache_image_RO(0,
+ nerrs += verify_cache_image_RO(0,
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
#if 1
- nerrs += verify_cache_image_RO(1,
+ nerrs += verify_cache_image_RO(1,
H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
- nerrs += verify_cache_image_RW(0,
+ nerrs += verify_cache_image_RW(0,
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
- nerrs += verify_cache_image_RW(1,
+ nerrs += verify_cache_image_RW(1,
H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size);
#endif
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 0782f3d..4677bfe 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -15,13 +15,12 @@
* Programmer: Leon Arber <larber@uiuc.edu>
* Sept. 28, 2006.
*
- * Purpose: This is the first half of a two-part test that makes sure
- * that a file can be read after a parallel application crashes as long
- * as the file was flushed first. We simulate a crash by
- * calling _exit(0) since this doesn't flush HDF5 caches but
- * still exits with success.
+ * Purpose: This is the first half of a two-part test that makes sure
+ * that a file can be read after a parallel application crashes as long
+ * as the file was flushed first. We simulate a crash by
+ * calling _exit(0) since this doesn't flush HDF5 caches but
+ * still exits with success.
*/
-#include <mpi.h>
#include "h5test.h"
const char *FILENAME[] = {
@@ -30,18 +29,18 @@ const char *FILENAME[] = {
NULL
};
-static double the_data[100][100];
+static double the_data[100][100];
/*-------------------------------------------------------------------------
- * Function: create_file
+ * Function: create_file
*
- * Purpose: Creates file used in part 1 of the test
+ * Purpose: Creates file used in part 1 of the test
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: 1
+ * Failure: 1
*
- * Programmer: Leon Arber
+ * Programmer: Leon Arber
* Sept. 26, 2006
*
* Modifications:
@@ -51,10 +50,10 @@ static double the_data[100][100];
static hid_t
create_file(char* name, hid_t fapl)
{
- hid_t file, dcpl, space, dset, groups, grp, plist;
- hsize_t ds_size[2] = {100, 100};
- hsize_t ch_size[2] = {5, 5};
- hsize_t i, j;
+ hid_t file, dcpl, space, dset, groups, grp, plist;
+ hsize_t ds_size[2] = {100, 100};
+ hsize_t ch_size[2] = {5, 5};
+ hsize_t i, j;
@@ -65,7 +64,7 @@ create_file(char* name, hid_t fapl)
if(H5Pset_chunk(dcpl, 2, ch_size) < 0) goto error;
if((space = H5Screate_simple(2, ds_size, NULL)) < 0) goto error;
if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
- goto error;
+ goto error;
plist = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
@@ -73,22 +72,22 @@ create_file(char* name, hid_t fapl)
/* Write some data */
for(i = 0; i < ds_size[0]; i++) {
- /*
- * The extra cast in the following statement is a bug workaround
- * for the Win32 version 5.0 compiler.
- * 1998-11-06 ptl
- */
- for(j = 0; j < ds_size[1]; j++)
- the_data[i][j] = (double)(hssize_t)i/(hssize_t)(j+1);
+ /*
+ * The extra cast in the following statement is a bug workaround
+ * for the Win32 version 5.0 compiler.
+ * 1998-11-06 ptl
+ */
+ for(j = 0; j < ds_size[1]; j++)
+ the_data[i][j] = (double)(hssize_t)i/(hssize_t)(j+1);
}
if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, plist, the_data) < 0) goto error;
/* Create some groups */
if((groups = H5Gcreate2(file, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
for(i = 0; i < 100; i++) {
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(grp) < 0) goto error;
+ sprintf(name, "grp%02u", (unsigned)i);
+ if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
+ if(H5Gclose(grp) < 0) goto error;
}
return file;
@@ -98,20 +97,20 @@ error:
}
/*-------------------------------------------------------------------------
- * Function: main
+ * Function: main
*
- * Purpose: Part 1 of a two-part H5Fflush() test.
+ * Purpose: Part 1 of a two-part H5Fflush() test.
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: 1
+ * Failure: 1
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, October 23, 1998
*
* Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called.
+ * Leon Arber
+ * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called.
*
*
*-------------------------------------------------------------------------
@@ -120,8 +119,8 @@ int
main(int argc, char* argv[])
{
hid_t file1, file2, fapl;
- MPI_File *mpifh_p = NULL;
- char name[1024];
+ MPI_File *mpifh_p = NULL;
+ char name[1024];
const char *envval = NULL;
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -135,26 +134,26 @@ main(int argc, char* argv[])
H5Pset_fapl_mpio(fapl, comm, info);
if(mpi_rank == 0)
- TESTING("H5Fflush (part1)");
+ TESTING("H5Fflush (part1)");
envval = HDgetenv("HDF5_DRIVER");
if(envval == NULL)
envval = "nomatch";
if(HDstrcmp(envval, "split")) {
- /* Create the file */
- h5_fixname(FILENAME[0], fapl, name, sizeof name);
- file1 = create_file(name, fapl);
- /* Flush and exit without closing the library */
- if(H5Fflush(file1, H5F_SCOPE_GLOBAL) < 0) goto error;
+ /* Create the file */
+ h5_fixname(FILENAME[0], fapl, name, sizeof name);
+ file1 = create_file(name, fapl);
+ /* Flush and exit without closing the library */
+ if(H5Fflush(file1, H5F_SCOPE_GLOBAL) < 0) goto error;
- /* Create the other file which will not be flushed */
- h5_fixname(FILENAME[1], fapl, name, sizeof name);
- file2 = create_file(name, fapl);
+ /* Create the other file which will not be flushed */
+ h5_fixname(FILENAME[1], fapl, name, sizeof name);
+ file2 = create_file(name, fapl);
- if(mpi_rank == 0)
- PASSED();
- fflush(stdout);
- fflush(stderr);
+ if(mpi_rank == 0)
+ PASSED();
+ fflush(stdout);
+ fflush(stderr);
} /* end if */
else {
SKIPPED();
@@ -171,21 +170,21 @@ main(int argc, char* argv[])
/* close file1 */
if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0) {
- printf("H5Fget_vfd_handle for file1 failed\n");
- goto error;
+ printf("H5Fget_vfd_handle for file1 failed\n");
+ goto error;
} /* end if */
if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
- printf("MPI_File_close for file1 failed\n");
- goto error;
+ printf("MPI_File_close for file1 failed\n");
+ goto error;
} /* end if */
/* close file2 */
if(H5Fget_vfd_handle(file2, fapl, (void **)&mpifh_p) < 0) {
- printf("H5Fget_vfd_handle for file2 failed\n");
- goto error;
+ printf("H5Fget_vfd_handle for file2 failed\n");
+ goto error;
} /* end if */
if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
- printf("MPI_File_close for file2 failed\n");
- goto error;
+ printf("MPI_File_close for file2 failed\n");
+ goto error;
} /* end if */
fflush(stdout);
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index a527503..aac5bee 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -16,7 +16,6 @@
*
*/
-#include "h5test.h"
#include "testpar.h"
/* The collection of files is included below to aid
@@ -42,7 +41,7 @@ const char *FILENAMES[NFILENAME + 1]={"t_pread_data_file",
#define COUNT 1000
hbool_t pass = true;
-static const char *random_hdf5_text =
+static const char *random_hdf5_text =
"Now is the time for all first-time-users of HDF5 to read their \
manual or go thru the tutorials!\n\
While you\'re at it, now is also the time to read up on MPI-IO.";
@@ -58,7 +57,7 @@ static int test_parallel_read(MPI_Comm comm, int mpi_rank, int group);
static char *test_argv0 = NULL;
extern char *dirname(char *path); /* Avoids additional includes */
-
+
/*-------------------------------------------------------------------------
* Function: generate_test_file
*
@@ -79,7 +78,7 @@ extern char *dirname(char *path); /* Avoids additional includes */
* In the overall scheme of running the test, we'll call
* this function twice: first as a collection of all MPI
* processes and then a second time with the processes split
- * more or less in half. Each sub group will operate
+ * more or less in half. Each sub group will operate
* collectively on their assigned file. This split into
* subgroups validates that parallel groups can successfully
* open and read data independantly from the other parallel
@@ -93,7 +92,7 @@ extern char *dirname(char *path); /* Avoids additional includes */
* 10/1/17
*
* Modifications:
- *
+ *
*-------------------------------------------------------------------------
*/
static int
@@ -110,14 +109,14 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
int group_size;
int group_rank;
int local_failure = 0;
- int global_failures = 0;
+ int global_failures = 0;
hsize_t count = COUNT;
hsize_t i;
hsize_t offset;
hsize_t dims[1] = {0};
hid_t file_id = -1;
- hid_t memspace = -1;
- hid_t filespace = -1;
+ hid_t memspace = -1;
+ hid_t filespace = -1;
hid_t fapl_id = -1;
hid_t dxpl_id = -1;
hid_t dset_id = -1;
@@ -153,7 +152,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
if ( pass ) {
if ( comm == MPI_COMM_WORLD ) { /* Test 1 */
file_index = 0;
- }
+ }
else if ( group_id == 0 ) { /* Test 2 group 0 */
file_index = 3;
}
@@ -161,8 +160,8 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
file_index = 6;
}
- /* The 'group_filename' is just a temp variable and
- * is used to call into the h5_fixname function. No
+ /* The 'group_filename' is just a temp variable and
+ * is used to call into the h5_fixname function. No
* need to worry that we reassign it for each file!
*/
group_filename = FILENAMES[file_index];
@@ -234,9 +233,9 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
}
}
- /* create the data file */
+ /* create the data file */
if ( pass ) {
- if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC,
+ if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC,
H5P_DEFAULT, fapl_id)) < 0 ) {
pass = FALSE;
failure_mssg = "H5Fcreate() failed.\n";
@@ -276,7 +275,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
if ( pass ) {
offset = (hsize_t)group_rank * (hsize_t)COUNT;
- if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset,
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset,
NULL, &count, NULL)) < 0 ) {
pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.\n";
@@ -284,8 +283,8 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
}
if ( pass ) {
- if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT,
- filespace, H5P_DEFAULT, H5P_DEFAULT,
+ if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT,
+ filespace, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT)) < 0 ) {
pass = false;
failure_mssg = "H5Dcreate2() failed.\n";
@@ -293,7 +292,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
}
if ( pass ) {
- if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace,
+ if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace,
filespace, dxpl_id, data_slice)) < 0 ) {
pass = false;
failure_mssg = "H5Dwrite() failed.\n";
@@ -344,15 +343,15 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
}
/* Add a userblock to the head of the datafile.
- * We will use this to for a functional test of the
+ * We will use this to for a functional test of the
* file open optimization. This is superblock
* relocation is done by the rank 0 process associated
* with the communicator being used. For test 1, we
* utilize MPI_COMM_WORLD, so group_rank 0 is the
* same as mpi_rank 0. For test 2 which utilizes
* two groups resulting from an MPI_Comm_split, we
- * will have parallel groups and hence two
- * group_rank(0) processes. Each parallel group
+ * will have parallel groups and hence two
+ * group_rank(0) processes. Each parallel group
* will create a unique file with different text
* headers and different data.
*
@@ -361,7 +360,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
if ( group_rank == 0 ) {
const char *text_to_write;
- size_t bytes_to_write;
+ size_t bytes_to_write;
if (group_id == 0)
text_to_write = random_hdf5_text;
@@ -406,7 +405,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
* if h5jam is co-located here. Otherwise, the autotools
* put things into directories, hence the relative path.
*/
- if (test_argv0 != NULL) {
+ if (test_argv0 != NULL) {
HDstrncpy(exe_path, test_argv0, sizeof(exe_path));
if ( (exe_dirname = (char *)dirname(exe_path)) != NULL) {
HDsprintf(cmd, "%s/h5jam", exe_dirname);
@@ -430,13 +429,13 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
}
/* collect results from other processes.
- * Only overwrite the failure message if no preveious error
+ * Only overwrite the failure message if no preveious error
* has been detected
*/
local_failure = ( pass ? 0 : 1 );
/* This is a global all reduce (NOT group specific) */
- if ( MPI_Allreduce(&local_failure, &global_failures, 1,
+ if ( MPI_Allreduce(&local_failure, &global_failures, 1,
MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
if ( pass ) {
pass = FALSE;
@@ -468,7 +467,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
} /* generate_test_file() */
-
+
/*-------------------------------------------------------------------------
* Function: test_parallel_read
*
@@ -511,7 +510,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
const char *group_filename = NULL;
char reloc_data_filename[FILENAME_BUF_SIZE];
int local_failure = 0;
- int global_failures = 0;
+ int global_failures = 0;
int group_size;
int group_rank;
hid_t fapl_id = -1;
@@ -597,7 +596,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
/* open the file -- should have user block, exercising the optimization */
if ( pass ) {
- if ( (file_id = H5Fopen(reloc_data_filename,
+ if ( (file_id = H5Fopen(reloc_data_filename,
H5F_ACC_RDONLY, fapl_id)) < 0 ) {
pass = FALSE;
failure_mssg = "H5Fopen() failed\n";
@@ -631,7 +630,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
if ( pass ) {
offset = (hsize_t)group_rank * count;
- if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
&offset, NULL, &count, NULL)) < 0 ) {
pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed\n";
@@ -640,14 +639,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
/* read this processes section of the data */
if ( pass ) {
- if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
filespace, H5P_DEFAULT, data_slice)) < 0 ) {
pass = FALSE;
failure_mssg = "H5Dread() failed\n";
}
}
-
- /* verify the data */
+
+ /* verify the data */
if ( pass ) {
nextValue = (float)((hsize_t)mpi_rank * count);
i = 0;
@@ -708,7 +707,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
*/
local_failure = ( pass ? 0 : 1 );
- if ( MPI_Allreduce( &local_failure, &global_failures, 1,
+ if ( MPI_Allreduce( &local_failure, &global_failures, 1,
MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
if ( pass ) {
pass = FALSE;
@@ -739,11 +738,11 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
}
- return( ! pass );
+ return( ! pass );
} /* test_parallel_read() */
-
+
/*-------------------------------------------------------------------------
* Function: main
*
@@ -830,7 +829,7 @@ main( int argc, char **argv)
/* ------ Create two (2) MPI groups ------
*
- * We split MPI_COMM_WORLD into 2 more or less equal sized
+ * We split MPI_COMM_WORLD into 2 more or less equal sized
* groups. The resulting communicators will be used to generate
* two HDF files which in turn will be opened in parallel and the
* contents verified in the second read test below.
@@ -858,7 +857,7 @@ main( int argc, char **argv)
}
goto finish;
}
-
+
/* We generate the file used for test 2 */
nerrs += generate_test_file( group_comm, mpi_rank, which_group );
@@ -917,7 +916,7 @@ finish:
HDfprintf(stdout, "===================================\n");
if ( nerrs > 0 ) {
-
+
HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs);
}
else {
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 1052a69..e695bfc 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -12,7 +12,7 @@
/*
This program will test independant and collective reads and writes between
- selections of different rank that non-the-less are deemed as having the
+ selections of different rank that non-the-less are deemed as having the
same shape by H5Sselect_shape_same().
*/
@@ -22,8 +22,6 @@
#define H5S_TESTING
-#include "hdf5.h"
-#include "H5private.h"
#include "testphdf5.h"
#include "H5Spkg.h" /* Dataspaces */
@@ -31,24 +29,24 @@
/* On Lustre (and perhaps other parallel file systems?), we have severe
* slow downs if two or more processes attempt to access the same file system
* block. To minimize this problem, we set alignment in the shape same tests
- * to the default Lustre block size -- which greatly reduces contention in
+ * to the default Lustre block size -- which greatly reduces contention in
* the chunked dataset case.
*/
-#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
+#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
-#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
+#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
struct hs_dr_pio_test_vars_t
{
- int mpi_size;
+ int mpi_size;
int mpi_rank;
MPI_Comm mpi_comm;
- MPI_Info mpi_info;
+ MPI_Info mpi_info;
int test_num;
int edge_size;
- int checker_edge_size;
+ int checker_edge_size;
int chunk_edge_size;
int small_rank;
int large_rank;
@@ -64,13 +62,13 @@ struct hs_dr_pio_test_vars_t
int small_ds_offset;
int large_ds_offset;
hid_t fid; /* HDF5 file ID */
- hid_t xfer_plist;
+ hid_t xfer_plist;
hid_t full_mem_small_ds_sid;
hid_t full_file_small_ds_sid;
hid_t mem_small_ds_sid;
hid_t file_small_ds_sid_0;
hid_t file_small_ds_sid_1;
- hid_t small_ds_slice_sid;
+ hid_t small_ds_slice_sid;
hid_t full_mem_large_ds_sid;
hid_t full_file_large_ds_sid;
hid_t mem_large_ds_sid;
@@ -78,7 +76,7 @@ struct hs_dr_pio_test_vars_t
hid_t file_large_ds_sid_1;
hid_t file_large_ds_process_slice_sid;
hid_t mem_large_ds_process_slice_sid;
- hid_t large_ds_slice_sid;
+ hid_t large_ds_slice_sid;
hid_t small_dataset; /* Dataset ID */
hid_t large_dataset; /* Dataset ID */
size_t small_ds_size;
@@ -96,25 +94,25 @@ struct hs_dr_pio_test_vars_t
hsize_t * count_ptr;
hsize_t * block_ptr;
int skips;
- int max_skips;
- int64_t total_tests;
- int64_t tests_run;
- int64_t tests_skipped;
+ int max_skips;
+ int64_t total_tests;
+ int64_t tests_run;
+ int64_t tests_skipped;
};
/*-------------------------------------------------------------------------
- * Function: hs_dr_pio_test__setup()
+ * Function: hs_dr_pio_test__setup()
*
- * Purpose: Do setup for tests of I/O to/from hyperslab selections of
- * different rank in the parallel case.
+ * Purpose: Do setup for tests of I/O to/from hyperslab selections of
+ * different rank in the parallel case.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/9/11
+ * Programmer: JRM -- 8/9/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -133,21 +131,21 @@ hs_dr_pio_test__setup(const int test_num,
const int express_test,
struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
const char *fcnName = "hs_dr_pio_test__setup()";
#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
const char *filename;
- hbool_t mis_match = FALSE;
- int i;
+ hbool_t mis_match = FALSE;
+ int i;
int mrc;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
uint32_t * ptr_0;
uint32_t * ptr_1;
- hid_t acc_tpl; /* File access templates */
+ hid_t acc_tpl; /* File access templates */
hid_t small_ds_dcpl_id = H5P_DEFAULT;
hid_t large_ds_dcpl_id = H5P_DEFAULT;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
HDassert( edge_size >= 6 );
HDassert( edge_size >= chunk_edge_size );
@@ -219,7 +217,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
- tv_ptr->small_ds_slice_buf =
+ tv_ptr->small_ds_slice_buf =
(uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
@@ -232,7 +230,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
- tv_ptr->large_ds_slice_buf =
+ tv_ptr->large_ds_slice_buf =
(uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
@@ -256,21 +254,21 @@ hs_dr_pio_test__setup(const int test_num,
filename = (const char *)GetTestParameters();
HDassert( filename != NULL );
-#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num);
HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d: small/large rank = %d/%d, use_collective_io = %d.\n",
- tv_ptr->mpi_rank, tv_ptr->small_rank, tv_ptr->large_rank,
+ tv_ptr->mpi_rank, tv_ptr->small_rank, tv_ptr->large_rank,
(int)use_collective_io);
HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n",
tv_ptr->mpi_rank, tv_ptr->edge_size, tv_ptr->chunk_edge_size);
HDfprintf(stdout, "%d: checker_edge_size = %d.\n",
tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n",
- tv_ptr->mpi_rank, (int)(tv_ptr->small_ds_size),
+ tv_ptr->mpi_rank, (int)(tv_ptr->small_ds_size),
(int)(tv_ptr->large_ds_size));
HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename);
}
@@ -305,78 +303,78 @@ hs_dr_pio_test__setup(const int test_num,
/* setup dims: */
tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1);
- tv_ptr->dims[1] = tv_ptr->dims[2] =
+ tv_ptr->dims[1] = tv_ptr->dims[2] =
tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
/* Create small ds dataspaces */
- tv_ptr->full_mem_small_ds_sid =
+ tv_ptr->full_mem_small_ds_sid =
H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_small_ds_sid != 0),
+ VRFY((tv_ptr->full_mem_small_ds_sid != 0),
"H5Screate_simple() full_mem_small_ds_sid succeeded");
- tv_ptr->full_file_small_ds_sid =
+ tv_ptr->full_file_small_ds_sid =
H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_small_ds_sid != 0),
+ VRFY((tv_ptr->full_file_small_ds_sid != 0),
"H5Screate_simple() full_file_small_ds_sid succeeded");
tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_small_ds_sid != 0),
+ VRFY((tv_ptr->mem_small_ds_sid != 0),
"H5Screate_simple() mem_small_ds_sid succeeded");
tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_0 != 0),
+ VRFY((tv_ptr->file_small_ds_sid_0 != 0),
"H5Screate_simple() file_small_ds_sid_0 succeeded");
/* used by checker board tests only */
tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_1 != 0),
+ VRFY((tv_ptr->file_small_ds_sid_1 != 0),
"H5Screate_simple() file_small_ds_sid_1 succeeded");
- tv_ptr->small_ds_slice_sid =
+ tv_ptr->small_ds_slice_sid =
H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->small_ds_slice_sid != 0),
+ VRFY((tv_ptr->small_ds_slice_sid != 0),
"H5Screate_simple() small_ds_slice_sid succeeded");
/* Create large ds dataspaces */
- tv_ptr->full_mem_large_ds_sid =
+ tv_ptr->full_mem_large_ds_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_large_ds_sid != 0),
+ VRFY((tv_ptr->full_mem_large_ds_sid != 0),
"H5Screate_simple() full_mem_large_ds_sid succeeded");
- tv_ptr->full_file_large_ds_sid =
+ tv_ptr->full_file_large_ds_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_large_ds_sid != FAIL),
+ VRFY((tv_ptr->full_file_large_ds_sid != FAIL),
"H5Screate_simple() full_file_large_ds_sid succeeded");
tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_sid != FAIL),
+ VRFY((tv_ptr->mem_large_ds_sid != FAIL),
"H5Screate_simple() mem_large_ds_sid succeeded");
tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_0 != FAIL),
+ VRFY((tv_ptr->file_large_ds_sid_0 != FAIL),
"H5Screate_simple() file_large_ds_sid_0 succeeded");
/* used by checker board tests only */
tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_1 != FAIL),
+ VRFY((tv_ptr->file_large_ds_sid_1 != FAIL),
"H5Screate_simple() file_large_ds_sid_1 succeeded");
- tv_ptr->mem_large_ds_process_slice_sid =
+ tv_ptr->mem_large_ds_process_slice_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
+ VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
"H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
- tv_ptr->file_large_ds_process_slice_sid =
+ tv_ptr->file_large_ds_process_slice_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
+ VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
"H5Screate_simple() file_large_ds_process_slice_sid succeeded");
- tv_ptr->large_ds_slice_sid =
+ tv_ptr->large_ds_slice_sid =
H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->large_ds_slice_sid != 0),
+ VRFY((tv_ptr->large_ds_slice_sid != 0),
"H5Screate_simple() large_ds_slice_sid succeeded");
@@ -386,18 +384,18 @@ hs_dr_pio_test__setup(const int test_num,
*/
if ( tv_ptr->chunk_edge_size > 0 ) {
- /* Under Lustre (and perhaps other parallel file systems?) we get
- * locking delays when two or more processes attempt to access the
+ /* Under Lustre (and perhaps other parallel file systems?) we get
+ * locking delays when two or more processes attempt to access the
* same file system block.
*
- * To minimize this problem, I have changed chunk_dims[0]
+ * To minimize this problem, I have changed chunk_dims[0]
* from (mpi_size + 1) to just when any sort of express test is
- * selected. Given the structure of the test, and assuming we
- * set the alignment large enough, this avoids the contention
- * issue by seeing to it that each chunk is only accessed by one
+ * selected. Given the structure of the test, and assuming we
+ * set the alignment large enough, this avoids the contention
+ * issue by seeing to it that each chunk is only accessed by one
* process.
*
- * One can argue as to whether this is a good thing to do in our
+ * One can argue as to whether this is a good thing to do in our
* tests, but for now it is necessary if we want the test to complete
* in a reasonable amount of time.
*
@@ -411,8 +409,8 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->chunk_dims[0] = 1;
}
- tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] =
- tv_ptr->chunk_dims[3] =
+ tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] =
+ tv_ptr->chunk_dims[3] =
tv_ptr->chunk_dims[4] = (hsize_t)(tv_ptr->chunk_edge_size);
small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
@@ -511,7 +509,7 @@ hs_dr_pio_test__setup(const int test_num,
/* write the initial value of the small data set to file */
- ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
+ ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
@@ -524,8 +522,8 @@ hs_dr_pio_test__setup(const int test_num,
VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
}
- /* read the small data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set and verifies it.
*/
ret = H5Dread(tv_ptr->small_dataset,
@@ -574,7 +572,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->count,
tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
-
+
/* In passing, setup the process slice data spaces as well */
ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid,
@@ -583,7 +581,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->stride,
tv_ptr->count,
tv_ptr->block);
- VRFY((ret >= 0),
+ VRFY((ret >= 0),
"H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid,
@@ -592,7 +590,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->stride,
tv_ptr->count,
tv_ptr->block);
- VRFY((ret >= 0),
+ VRFY((ret >= 0),
"H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
if ( MAINPROCESS ) { /* add an additional slice to the selections */
@@ -618,8 +616,8 @@ hs_dr_pio_test__setup(const int test_num,
/* write the initial value of the large data set to file */
- ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type,
- tv_ptr->mem_large_ds_sid, tv_ptr->file_large_ds_sid_0,
+ ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type,
+ tv_ptr->mem_large_ds_sid, tv_ptr->file_large_ds_sid_0,
tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
@@ -633,8 +631,8 @@ hs_dr_pio_test__setup(const int test_num,
}
- /* read the large data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set.
*/
ret = H5Dread(tv_ptr->large_dataset,
@@ -678,18 +676,18 @@ hs_dr_pio_test__setup(const int test_num,
/*-------------------------------------------------------------------------
- * Function: hs_dr_pio_test__takedown()
+ * Function: hs_dr_pio_test__takedown()
*
- * Purpose: Do takedown after tests of I/O to/from hyperslab selections
- * of different rank in the parallel case.
+ * Purpose: Do takedown after tests of I/O to/from hyperslab selections
+ * of different rank in the parallel case.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/18/09
+ * Programmer: JRM -- 9/18/09
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -699,11 +697,11 @@ hs_dr_pio_test__setup(const int test_num,
static void
hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
+#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
const char *fcnName = "hs_dr_pio_test__takedown()";
#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
- int mpi_rank; /* needed by the VRFY macro */
- herr_t ret; /* Generic return value */
+ int mpi_rank; /* needed by the VRFY macro */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -787,27 +785,27 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__d2m_l2s()
+ * Function: contig_hs_dr_pio_test__d2m_l2s()
*
- * Purpose: Part one of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5S_select_shape_same()
+ * views as being of the same shape.
*
- * In this function, we test this by reading small_rank - 1
- * slices from the on disk large cube, and verifying that the
- * data read is correct. Verify that H5S_select_shape_same()
- * returns true on the memory and file selections.
+ * In this function, we test this by reading small_rank - 1
+ * slices from the on disk large cube, and verifying that the
+ * data read is correct. Verify that H5S_select_shape_same()
+ * returns true on the memory and file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/10/11
+ * Programmer: JRM -- 9/10/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -817,24 +815,24 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* We have already done a H5Sselect_all() on the data space
- * small_ds_slice_sid in the initialization phase, so no need to
+ /* We have already done a H5Sselect_all() on the data space
+ * small_ds_slice_sid in the initialization phase, so no need to
* call H5Sselect_all() again.
*/
@@ -859,16 +857,16 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the buffer we will be reading into */
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout,
"%s reading slices from big cube on disk into small cube slice.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set. However, in the parallel version, each
+ * of the large data set. However, in the parallel version, each
* process only works with that slice of the large cube indicated
- * by its rank -- hence we set the most slowly changing index to
+ * by its rank -- hence we set the most slowly changing index to
* mpi_rank, and don't itterate over it.
*/
@@ -881,9 +879,9 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -907,7 +905,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -921,14 +919,14 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
do {
if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
- (tv_ptr->tests_skipped)++;
+ (tv_ptr->tests_skipped)++;
} else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
- /* we know that small_rank - 1 >= 1 and that
- * large_rank > small_rank by the assertions at the head
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
* of this function. Thus no need for another inner loop.
*/
tv_ptr->start[0] = (hsize_t)i;
@@ -943,7 +941,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Sselect_hyperslab(file_large_cube_sid) succeeded");
@@ -956,11 +954,11 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* Read selection from disk */
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
fcnName,
@@ -981,7 +979,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
mis_match = FALSE;
ptr_1 = tv_ptr->small_ds_slice_buf;
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -1000,10 +998,10 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
expected_value++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small slice read from large ds data good.");
- (tv_ptr->tests_run)++;
+ (tv_ptr->tests_run)++;
}
l++;
@@ -1028,27 +1026,27 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__d2m_s2l()
+ * Function: contig_hs_dr_pio_test__d2m_s2l()
*
- * Purpose: Part two of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5S_select_shape_same()
+ * views as being of the same shape.
*
- * In this function, we test this by reading slices of the
- * on disk small data set into slices through the in memory
- * large data set, and verify that the correct data (and
- * only the correct data) is read.
+ * In this function, we test this by reading slices of the
+ * on disk small data set into slices through the in memory
+ * large data set, and verify that the correct data (and
+ * only the correct data) is read.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/10/11
+ * Programmer: JRM -- 8/10/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -1058,25 +1056,25 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* Read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* Read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
@@ -1102,8 +1100,8 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout,
"%s reading slices of on disk small data set into slices of big data set.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
@@ -1131,11 +1129,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -1149,9 +1147,9 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -1175,7 +1173,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -1211,7 +1209,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
@@ -1224,11 +1222,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* Read selection from disk */
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1250,7 +1248,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
expected_value = (uint32_t)
((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -1283,7 +1281,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small slice read from large ds data good.");
(tv_ptr->tests_run)++;
@@ -1311,29 +1309,29 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__m2d_l2s()
+ * Function: contig_hs_dr_pio_test__m2d_l2s()
*
- * Purpose: Part three of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part three of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5S_select_shape_same()
+ * views as being of the same shape.
*
- * Do this by writing small_rank - 1 dimensional slices from
- * the in memory large data set to the on disk small cube
- * dataset. After each write, read the slice of the small
- * dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same()
- * returns true on the memory and file selections.
+ * Do this by writing small_rank - 1 dimensional slices from
+ * the in memory large data set to the on disk small cube
+ * dataset. After each write, read the slice of the small
+ * dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5S_select_shape_same()
+ * returns true on the memory and file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/10/11
+ * Programmer: JRM -- 8/10/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -1343,19 +1341,19 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -1365,10 +1363,10 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* from memory to file using selections of different rank that
* H5S_select_shape_same() views as being of the same shape.
*
- * Start by writing small_rank - 1 dimensional slices from the in memory large
- * data set to the on disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same() returns true on
+ * Start by writing small_rank - 1 dimensional slices from the in memory large
+ * data set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5S_select_shape_same() returns true on
* the memory and file selections.
*/
@@ -1424,18 +1422,18 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout,
"%s writing slices from big ds to slices of small ds on disk.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -1449,9 +1447,9 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -1476,7 +1474,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -1525,7 +1523,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret >= 0),
+ VRFY((ret >= 0),
"H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
@@ -1538,13 +1536,13 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
- /* write the slice from the in memory large data set to the
+ /* write the slice from the in memory large data set to the
* slice of the on disk small dataset. */
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1576,7 +1574,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1 = tv_ptr->small_ds_buf_1;
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -1611,7 +1609,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small slice write from large ds data good.");
(tv_ptr->tests_run)++;
@@ -1639,31 +1637,31 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__m2d_s2l()
+ * Function: contig_hs_dr_pio_test__m2d_s2l()
*
- * Purpose: Part four of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part four of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5S_select_shape_same()
+ * views as being of the same shape.
*
- * Do this by writing the contents of the process's slice of
- * the in memory small data set to slices of the on disk
- * large data set. After each write, read the process's
- * slice of the large data set back into memory, and verify
- * that it contains the expected data.
+ * Do this by writing the contents of the process's slice of
+ * the in memory small data set to slices of the on disk
+ * large data set. After each write, read the process's
+ * slice of the large data set back into memory, and verify
+ * that it contains the expected data.
*
- * Verify that H5S_select_shape_same() returns true on the
- * memory and file selections.
+ * Verify that H5S_select_shape_same() returns true on the
+ * memory and file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/10/11
+ * Programmer: JRM -- 8/10/11
*
* Modifications:
*
- * None
+ * None
*
*-------------------------------------------------------------------------
*/
@@ -1673,32 +1671,32 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5S_select_shape_same() returns true on the memory
* and file selections.
*/
- /* select the slice of the in memory small data set associated with
+ /* select the slice of the in memory small data set associated with
* the process's mpi rank.
*/
tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
@@ -1745,8 +1743,8 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the in memory large ds */
HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout,
"%s writing process slices of small ds to slices of large ds on disk.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
@@ -1760,9 +1758,9 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -1786,7 +1784,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -1802,18 +1800,18 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->tests_skipped)++;
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
tv_ptr->start[0] = (hsize_t)i;
tv_ptr->start[1] = (hsize_t)j;
tv_ptr->start[2] = (hsize_t)k;
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDfprintf(stdout,
- "%s:%d: skipping test with start = %d %d %d %d %d.\n",
+ HDfprintf(stdout,
+ "%s:%d: skipping test with start = %d %d %d %d %d.\n",
fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1857,7 +1855,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Sselect_hyperslab() target large ds slice succeeded");
@@ -1871,14 +1869,14 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
VRFY((check == TRUE), "H5S_select_shape_same_test passed");
- /* write the small data set slice from memory to the
- * target slice of the disk data set
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
*/
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1891,11 +1889,11 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_sid_0,
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dwrite of small ds slice to large ds succeeded");
- /* read this processes slice on the on disk large
+ /* read this processes slice on the on disk large
* data set into memory.
*/
@@ -1905,7 +1903,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_process_slice_sid,
tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dread() of process slice of large ds succeeded");
@@ -1914,12 +1912,12 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
ptr_1 = tv_ptr->large_ds_buf_1;
expected_value = (uint32_t)
- ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
+ ((i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size *
+ (j * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
(l * tv_ptr->edge_size));
@@ -1951,7 +1949,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small ds slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
@@ -1979,29 +1977,29 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__run_test()
+ * Function: contig_hs_dr_pio_test__run_test()
*
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel.
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/18/09
+ * Programmer: JRM -- 9/18/09
*
* Modifications:
*
- * JRM -- 9/16/10
- * Added express_test parameter. Use it to control whether
- * we set up the chunks so that no chunk is shared between
- * processes, and also whether we set an alignment when we
- * create the test file.
+ * JRM -- 9/16/10
+ * Added express_test parameter. Use it to control whether
+ * we set up the chunks so that no chunk is shared between
+ * processes, and also whether we set an alignment when we
+ * create the test file.
*
- * JRM -- 8/11/11
- * Refactored function heavily & broke it into six functions.
- * Added the skips_ptr, max_skips, total_tests_ptr,
- * tests_run_ptr, and tests_skiped_ptr parameters to support
- * skipping portions of the test according to the express
- * test value.
+ * JRM -- 8/11/11
+ * Refactored function heavily & broke it into six functions.
+ * Added the skips_ptr, max_skips, total_tests_ptr,
+ * tests_run_ptr, and tests_skiped_ptr parameters to support
+ * skipping portions of the test according to the express
+ * test value.
*
*-------------------------------------------------------------------------
*/
@@ -2023,13 +2021,13 @@ contig_hs_dr_pio_test__run_test(const int test_num,
int64_t * tests_run_ptr,
int64_t * tests_skipped_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- int mpi_rank;
- struct hs_dr_pio_test_vars_t test_vars =
+ int mpi_rank;
+ struct hs_dr_pio_test_vars_t test_vars =
{
- /* int mpi_size = */ -1,
+ /* int mpi_size = */ -1,
/* int mpi_rank = */ -1,
/* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
/* MPI_Inf mpi_info = */ MPI_INFO_NULL,
@@ -2045,7 +2043,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* uint32_t * small_ds_buf_2 = */ NULL,
/* uint32_t * small_ds_slice_buf = */ NULL,
/* uint32_t * large_ds_buf_0 = */ NULL,
- /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
/* uint32_t * large_ds_buf_2 = */ NULL,
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
@@ -2082,8 +2080,8 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* hsize_t * stride_ptr = */ NULL,
/* hsize_t * count_ptr = */ NULL,
/* hsize_t * block_ptr = */ NULL,
- /* int skips = */ 0,
- /* int max_skips = */ 0,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
/* int64_t total_tests = */ 0,
/* int64_t tests_run = */ 0,
/* int64_t tests_skipped = */ 0
@@ -2101,7 +2099,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
tv_ptr->skips = *skips_ptr;
tv_ptr->max_skips = max_skips;
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n",
test_num, small_rank, large_rank);
@@ -2113,12 +2111,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
* of different rank that H5S_select_shape_same() views as being of the
* same shape.
*
- * Start by reading small_rank - 1 dimensional slice from the on disk
- * large cube, and verifying that the data read is correct. Verify that
+ * Start by reading small_rank - 1 dimensional slice from the on disk
+ * large cube, and verifying that the data read is correct. Verify that
* H5S_select_shape_same() returns true on the memory and file selections.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num);
}
@@ -2126,12 +2124,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
contig_hs_dr_pio_test__d2m_l2s(tv_ptr);
- /* Second, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* Second, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num);
}
@@ -2144,13 +2142,13 @@ contig_hs_dr_pio_test__run_test(const int test_num,
* H5S_select_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same() returns true on
+ * set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5S_select_shape_same() returns true on
* the memory and file selections.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num);
}
@@ -2158,25 +2156,25 @@ contig_hs_dr_pio_test__run_test(const int test_num,
contig_hs_dr_pio_test__m2d_l2s(tv_ptr);
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5S_select_shape_same() returns true on the memory
* and file selections.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
contig_hs_dr_pio_test__m2d_s2l(tv_ptr);
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
- HDfprintf(stdout,
- "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ HDfprintf(stdout,
+ "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
(long long)(tv_ptr->total_tests));
}
@@ -2184,7 +2182,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
hs_dr_pio_test__takedown(tv_ptr);
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
}
@@ -2201,28 +2199,28 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+ * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
*
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel case.
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/18/09
+ * Programmer: JRM -- 9/18/09
*
* Modifications:
*
- * Modified function to take a sample of the run times
- * of the different tests, and skip some of them if
- * run times are too long.
+ * Modified function to take a sample of the run times
+ * of the different tests, and skip some of them if
+ * run times are too long.
*
- * We need to do this because Lustre runns very slowly
- * if two or more processes are banging on the same
- * block of memory.
- * JRM -- 9/10/10
+ * We need to do this because Lustre runns very slowly
+ * if two or more processes are banging on the same
+ * block of memory.
+ * JRM -- 9/10/10
* Break this one big test into 4 smaller tests according
* to {independent,collective}x{contigous,chunked} datasets.
- * AKC -- 2010/01/14
+ * AKC -- 2010/01/14
*
*-------------------------------------------------------------------------
*/
@@ -2236,23 +2234,23 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
int local_express_test;
int mpi_rank = -1;
int mpi_size;
- int test_num = 0;
- int edge_size;
- int chunk_edge_size = 0;
- int small_rank;
- int large_rank;
- int mpi_result;
- int skips = 0;
- int max_skips = 0;
- /* The following table list the number of sub-tests skipped between
- * each test that is actually executed as a function of the express
+ int test_num = 0;
+ int edge_size;
+ int chunk_edge_size = 0;
+ int small_rank;
+ int large_rank;
+ int mpi_result;
+ int skips = 0;
+ int max_skips = 0;
+ /* The following table list the number of sub-tests skipped between
+ * each test that is actually executed as a function of the express
* test level. Note that any value in excess of 4880 will cause all
* sub tests to be skipped.
*/
int max_skips_tbl[4] = {0, 4, 64, 1024};
- hid_t dset_type = H5T_NATIVE_UINT;
- int64_t total_tests = 0;
- int64_t tests_run = 0;
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
int64_t tests_skipped = 0;
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
@@ -2295,7 +2293,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- FALSE,
+ FALSE,
dset_type,
express_test,
&skips,
@@ -2316,7 +2314,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- TRUE,
+ TRUE,
dset_type,
express_test,
&skips,
@@ -2337,7 +2335,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- FALSE,
+ FALSE,
dset_type,
express_test,
&skips,
@@ -2358,7 +2356,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- TRUE,
+ TRUE,
dset_type,
express_test,
&skips,
@@ -2377,7 +2375,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* end of switch(sstest_type) */
#if CONTIG_HS_DR_PIO_TEST__DEBUG
if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
- HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n",
+ HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n",
tests_run, tests_skipped, total_tests);
}
#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
@@ -2385,7 +2383,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
- HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n",
+ HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n",
tests_skipped, total_tests);
}
@@ -2396,24 +2394,24 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/****************************************************************
**
-** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
-** Given a data space of tgt_rank, and dimensions:
+** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
+** Given a data space of tgt_rank, and dimensions:
**
-** (mpi_size + 1), edge_size, ... , edge_size
+** (mpi_size + 1), edge_size, ... , edge_size
**
-** edge_size, and a checker_edge_size, select a checker
-** board selection of a sel_rank (sel_rank < tgt_rank)
-** dimensional slice through the data space parallel to the
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the data space parallel to the
** sel_rank fastest changing indicies, with origin (in the
-** higher indicies) as indicated by the start array.
+** higher indicies) as indicated by the start array.
**
-** Note that this function, like all its relatives, is
-** hard coded to presume a maximum data space rank of 5.
-** While this maximum is declared as a constant, increasing
-** it will require extensive coding in addition to changing
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum data space rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
** the value of the constant.
**
-** JRM -- 10/8/09
+** JRM -- 10/8/09
**
****************************************************************/
@@ -2428,22 +2426,22 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
const int sel_rank,
hsize_t sel_start[])
{
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char * fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
-#endif
- hbool_t first_selection = TRUE;
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char * fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
+#endif
+ hbool_t first_selection = TRUE;
int i, j, k, l, m;
- int n_cube_offset;
- int sel_offset;
- const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
+ int n_cube_offset;
+ int sel_offset;
+ const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
/* this changes */
- hsize_t base_count;
+ hsize_t base_count;
hsize_t offset_count;
- hsize_t start[PAR_SS_DR_MAX_RANK];
- hsize_t stride[PAR_SS_DR_MAX_RANK];
- hsize_t count[PAR_SS_DR_MAX_RANK];
- hsize_t block[PAR_SS_DR_MAX_RANK];
- herr_t ret; /* Generic return value */
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ herr_t ret; /* Generic return value */
HDassert( edge_size >= 6 );
HDassert( 0 < checker_edge_size );
@@ -2460,14 +2458,14 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
HDassert( n_cube_offset >= 0 );
HDassert( n_cube_offset <= sel_offset );
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n",
fcnName, mpi_rank, edge_size, checker_edge_size);
- HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
fcnName, mpi_rank, sel_rank, sel_offset);
- HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n",
fcnName, mpi_rank, tgt_rank, n_cube_offset);
-#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* First, compute the base count (which assumes start == 0
* for the associated offset) and offset_count (which
@@ -2497,7 +2495,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
}
/* Now set up the stride and block arrays, and portions of the start
- * and count arrays that will not be altered during the selection of
+ * and count arrays that will not be altered during the selection of
* the checker board.
*/
i = 0;
@@ -2529,7 +2527,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
i++;
}
-
+
i = 0;
do {
if ( 0 >= sel_offset ) {
@@ -2548,7 +2546,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
}
j = 0;
- do {
+ do {
if ( 1 >= sel_offset ) {
if ( j == 0 ) {
@@ -2617,62 +2615,62 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
if ( ((i + j + k + l + m) % 2) == 0 ) {
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n",
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n",
fcnName, mpi_rank, (int)first_selection);
HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
fcnName, mpi_rank, i, j, k, l, m);
- HDfprintf(stdout,
- "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
+ HDfprintf(stdout,
+ "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
(int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout,
- "%s:%d: stride = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ HDfprintf(stdout,
+ "%s:%d: stride = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
(int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout,
- "%s:%d: count = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
+ HDfprintf(stdout,
+ "%s:%d: count = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
(int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout,
- "%s:%d: block = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
+ HDfprintf(stdout,
+ "%s:%d: block = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
(int)block[2], (int)block[3], (int)block[4]);
- HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n",
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n",
fcnName, mpi_rank,
H5Sget_simple_extent_ndims(tgt_sid));
- HDfprintf(stdout, "%s:%d: selection rank = %d.\n",
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n",
fcnName, mpi_rank, sel_rank);
#endif
if ( first_selection ) {
- first_selection = FALSE;
+ first_selection = FALSE;
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_SET,
- &(start[n_cube_offset]),
- &(stride[n_cube_offset]),
- &(count[n_cube_offset]),
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
&(block[n_cube_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
} else {
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_OR,
- &(start[n_cube_offset]),
- &(stride[n_cube_offset]),
- &(count[n_cube_offset]),
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
&(block[n_cube_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
}
@@ -2704,7 +2702,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
} while ( ( i <= 1 ) &&
( 0 >= sel_offset ) );
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
@@ -2724,7 +2722,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
@@ -2737,57 +2735,57 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
/****************************************************************
**
-** ckrbrd_hs_dr_pio_test__verify_data():
+** ckrbrd_hs_dr_pio_test__verify_data():
**
-** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
** otherwise.
**
-** The supplied buffer is presumed to this process's slice
-** of the target data set. Each such slice will be an
-** n-cube of rank (rank -1) and the supplied edge_size with
-** origin (mpi_rank, 0, ... , 0) in the target data set.
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
**
-** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
-** of the target data set. Also, this slice must be parallel
-** to the fastest changing indicies.
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indicies.
**
-** It is further presumed that the buffer was zeroed before
-** the read/write, and that the full target data set (i.e.
-** the buffer/data set for all processes) was initialized
-** with the natural numbers listed in order from the origin
-** along the fastest changing axis.
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
**
** Thus for a 20x10x10 dataset, the value stored in location
-** (x, y, z) (assuming that z is the fastest changing index
-** and x the slowest) is assumed to be:
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
**
-** (10 * 10 * x) + (10 * y) + z
+** (10 * 10 * x) + (10 * y) + z
**
-** Further, supposing that this is process 10, this process's
-** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
-** to the checkerboard selection) as follows:
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
**
-** 1000, 1001, 1002, ... 1008, 1009
-** 1010, 1011, 1012, ... 1018, 1019
-** . . . . .
-** . . . . .
-** . . . . .
-** 1090, 1091, 1092, ... 1098, 1099
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
**
-** In the case of a read from the processors slice of another
-** data set of different rank, the values expected will have
-** to be adjusted accordingly. This is done via the
-** first_expected_val parameter.
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
**
-** Finally, the function presumes that the first element
-** of the buffer resides either at the origin of either
-** a selected or an unselected checker. (Translation:
-** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube oposite the origin.)
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube oposite the origin.)
**
****************************************************************/
@@ -2802,7 +2800,7 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
hbool_t buf_starts_in_checker)
{
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- const char * fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
+ const char * fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
#endif
hbool_t good_data = TRUE;
hbool_t in_checker;
@@ -2821,9 +2819,9 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
HDassert( checker_edge_size <= edge_size );
HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- int mpi_rank;
+ int mpi_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
@@ -2875,7 +2873,7 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
y = 0;
start_in_checker[3] = start_in_checker[2];
do
- {
+ {
if ( y >= checker_edge_size ) {
start_in_checker[3] = ! start_in_checker[3];
@@ -2884,13 +2882,13 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
m = 0;
z = 0;
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
#endif
in_checker = start_in_checker[3];
do
{
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
HDfprintf(stdout, " %d", (int)(*val_ptr));
#endif
if ( z >= checker_edge_size ) {
@@ -2898,21 +2896,21 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
in_checker = ! in_checker;
z = 0;
}
-
+
if ( in_checker ) {
-
+
if ( *val_ptr != expected_value ) {
good_data = FALSE;
}
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
} else if ( *val_ptr != 0 ) {
good_data = FALSE;
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
@@ -2922,10 +2920,10 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
expected_value++;
m++;
z++;
-
+
} while ( ( rank >= (test_max_rank - 4) ) &&
( m < edge_size ) );
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
HDfprintf(stdout, "\n");
#endif
l++;
@@ -2951,28 +2949,28 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
+ * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
*
- * Purpose: Part one of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can read from disk correctly using checker
- * board selections of different rank that
+ * Verify that we can read from disk correctly using checker
+ * board selections of different rank that
* H5S_select_shape_same() views as being of the same shape.
*
- * In this function, we test this by reading small_rank - 1
- * checker board slices from the on disk large cube, and
- * verifying that the data read is correct. Verify that
- * H5S_select_shape_same() returns true on the memory and
- * file selections.
+ * In this function, we test this by reading small_rank - 1
+ * checker board slices from the on disk large cube, and
+ * verifying that the data read is correct. Verify that
+ * H5S_select_shape_same() returns true on the memory and
+ * file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/15/11
+ * Programmer: JRM -- 9/15/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -2982,17 +2980,17 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
static void
ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
uint32_t * ptr_0;
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
- uint32_t expected_value;
- int mpi_rank; /* needed by VRFY */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ uint32_t expected_value;
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -3002,9 +3000,9 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* of different rank that H5S_select_shape_same() views as being of the
* same shape.
*
- * Start by reading a (small_rank - 1)-D checker board slice from this
- * processes slice of the on disk large data set, and verifying that the
- * data read is correct. Verify that H5S_select_shape_same() returns
+ * Start by reading a (small_rank - 1)-D checker board slice from this
+ * processes slice of the on disk large data set, and verifying that the
+ * data read is correct. Verify that H5S_select_shape_same() returns
* true on the memory and file selections.
*
* The first step is to set up the needed checker board selection in the
@@ -3025,7 +3023,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the buffer we will be reading into */
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ",
fcnName, tv_ptr->mpi_rank);
ptr_0 = tv_ptr->small_ds_slice_buf;
@@ -3034,7 +3032,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_0++;
}
HDfprintf(stdout, "\n");
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
@@ -3054,15 +3052,15 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
}
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout,
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout,
"%s:%d: reading slice from big ds on disk into small ds slice.\n",
fcnName, tv_ptr->mpi_rank);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set. However, in the parallel version, each
+ * of the large data set. However, in the parallel version, each
* process only works with that slice of the large cube indicated
- * by its rank -- hence we set the most slowly changing index to
+ * by its rank -- hence we set the most slowly changing index to
* mpi_rank, and don't itterate over it.
*/
@@ -3075,9 +3073,9 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -3101,7 +3099,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -3121,8 +3119,8 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->skips = 0; /* reset the skips counter */
- /* we know that small_rank - 1 >= 1 and that
- * large_rank > small_rank by the assertions at the head
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
* of this function. Thus no need for another inner loop.
*/
tv_ptr->start[0] = (hsize_t)i;
@@ -3157,15 +3155,15 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* Read selection from disk */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName,
- tv_ptr->mpi_rank, tv_ptr->start[0], tv_ptr->start[1],
+ tv_ptr->mpi_rank, tv_ptr->start[0], tv_ptr->start[1],
tv_ptr->start[2], tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
fcnName,
H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
ret = H5Dread(tv_ptr->large_dataset,
H5T_NATIVE_UINT32,
@@ -3175,15 +3173,15 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->small_ds_slice_buf);
VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: H5Dread() returns.\n",
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n",
fcnName, tv_ptr->mpi_rank);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* verify that expected data is retrieved */
expected_value = (uint32_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
+ ((i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -3199,7 +3197,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
(hbool_t)TRUE
);
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"small slice read from large ds data good.");
(tv_ptr->tests_run)++;
@@ -3227,27 +3225,27 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
+ * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
*
- * Purpose: Part two of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5S_select_shape_same()
+ * views as being of the same shape.
*
- * In this function, we test this by reading checker board
- * slices of the on disk small data set into slices through
- * the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
+ * In this function, we test this by reading checker board
+ * slices of the on disk small data set into slices through
+ * the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/15/11
+ * Programmer: JRM -- 8/15/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -3257,27 +3255,27 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
size_t u;
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* similarly, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
@@ -3292,8 +3290,8 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->small_rank - 1,
sel_start);
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout,
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout,
"%s reading slices of on disk small data set into slices of big data set.\n",
fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
@@ -3303,7 +3301,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read the slice of the small data set
- * into different slices of the process slice of the large data
+ * into different slices of the process slice of the large data
* set.
*/
for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
@@ -3322,11 +3320,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -3340,9 +3338,9 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -3366,7 +3364,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -3423,11 +3421,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* Read selection from disk */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
- tv_ptr->start[3], tv_ptr->start[4]);
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
+ tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid),
@@ -3446,21 +3444,21 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
data_ok = TRUE;
ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value =
+ expected_value =
(uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
(l * tv_ptr->edge_size));
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
{
int m, n;
- HDfprintf(stdout, "%s:%d: expected_value = %d.\n",
+ HDfprintf(stdout, "%s:%d: expected_value = %d.\n",
fcnName, tv_ptr->mpi_rank, expected_value);
HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n",
fcnName, tv_ptr->mpi_rank, start_index, stop_index);
@@ -3495,7 +3493,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"slice read from small to large ds data good(1).");
data_ok = ckrbrd_hs_dr_pio_test__verify_data
@@ -3508,7 +3506,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(hbool_t)TRUE
);
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"slice read from small to large ds data good(2).");
@@ -3527,7 +3525,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"slice read from small to large ds data good(3).");
(tv_ptr->tests_run)++;
@@ -3555,31 +3553,31 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
+ * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
*
- * Purpose: Part three of a series of tests of I/O to/from checker
- * board hyperslab selections of different rank in the
- * parallel.
+ * Purpose: Part three of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the
+ * parallel.
*
- * Verify that we can write from memory to file using checker
- * board selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * Verify that we can write from memory to file using checker
+ * board selections of different rank that
+ * H5S_select_shape_same() views as being of the same shape.
*
- * Do this by writing small_rank - 1 dimensional checker
- * board slices from the in memory large data set to the on
- * disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify
- * that it contains the expected data. Verify that
- * H5S_select_shape_same() returns true on the memory and
- * file selections.
+ * Do this by writing small_rank - 1 dimensional checker
+ * board slices from the in memory large data set to the on
+ * disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify
+ * that it contains the expected data. Verify that
+ * H5S_select_shape_same() returns true on the memory and
+ * file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/15/11
+ * Programmer: JRM -- 8/15/11
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -3589,21 +3587,21 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
size_t u;
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -3614,9 +3612,9 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* H5S_select_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small dataset. After each write, read the slice of
- * the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5S_select_shape_same() returns true on
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5S_select_shape_same() returns true on
* the memory and file selections.
*/
@@ -3684,18 +3682,18 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout,
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout,
"%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -3709,9 +3707,9 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -3736,7 +3734,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -3760,7 +3758,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* by the assertions at the head of this function. Thus no
* need for another inner loop.
*/
-
+
/* zero out this rank's slice of the on disk small data set */
ret = H5Dwrite(tv_ptr->small_dataset,
H5T_NATIVE_UINT32,
@@ -3769,7 +3767,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_2);
VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
-
+
/* select the portion of the in memory large cube from which we
* are going to write data.
*/
@@ -3778,13 +3776,13 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[2] = (hsize_t)k;
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
-
+
HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1));
-
+
ckrbrd_hs_dr_pio_test__slct_ckrbrd
(
tv_ptr->mpi_rank,
@@ -3795,26 +3793,26 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->small_rank - 1,
tv_ptr->start
);
-
-
+
+
/* verify that H5S_select_shape_same() reports the in
- * memory checkerboard selection of the slice through the
+ * memory checkerboard selection of the slice through the
* large dataset and the checkerboard selection of the process
* slice of the small data set as having the same shape.
*/
check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_1,
tv_ptr->mem_large_ds_sid);
VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
-
-
- /* write the checker board selection of the slice from the in
- * memory large data set to the slice of the on disk small
- * dataset.
+
+
+ /* write the checker board selection of the slice from the in
+ * memory large data set to the slice of the on disk small
+ * dataset.
*/
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -3828,8 +3826,8 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
-
-
+
+
/* read the on disk process slice of the small dataset into memory */
ret = H5Dread(tv_ptr->small_dataset,
H5T_NATIVE_UINT32,
@@ -3838,30 +3836,30 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_1);
VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
-
+
+
/* verify that expected data is retrieved */
-
+
mis_match = FALSE;
-
+
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
(l * tv_ptr->edge_size));
-
+
start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
+
HDassert( start_index < stop_index );
HDassert( stop_index <= tv_ptr->small_ds_size );
-
+
data_ok = TRUE;
-
+
ptr_1 = tv_ptr->small_ds_buf_1;
for ( u = 0; u < start_index; u++, ptr_1++ ) {
-
+
if ( *ptr_1 != 0 ) {
data_ok = FALSE;
@@ -3890,7 +3888,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"large slice write slice to small slice data good.");
(tv_ptr->tests_run)++;
@@ -3918,31 +3916,31 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
+ * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
*
- * Purpose: Part four of a series of tests of I/O to/from checker
- * board hyperslab selections of different rank in the parallel.
+ * Purpose: Part four of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the parallel.
*
- * Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5S_select_shape_same()
+ * views as being of the same shape.
*
- * Do this by writing checker board selections of the contents
- * of the process's slice of the in memory small data set to
- * slices of the on disk large data set. After each write,
- * read the process's slice of the large data set back into
- * memory, and verify that it contains the expected data.
+ * Do this by writing checker board selections of the contents
+ * of the process's slice of the in memory small data set to
+ * slices of the on disk large data set. After each write,
+ * read the process's slice of the large data set back into
+ * memory, and verify that it contains the expected data.
*
- * Verify that H5S_select_shape_same() returns true on the
- * memory and file selections.
+ * Verify that H5S_select_shape_same() returns true on the
+ * memory and file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 8/15/11
+ * Programmer: JRM -- 8/15/11
*
* Modifications:
*
- * None
+ * None
*
*-------------------------------------------------------------------------
*/
@@ -3952,31 +3950,31 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
size_t u;
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5S_select_shape_same() returns true on the memory
* and file selections.
*/
@@ -4009,7 +4007,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) suceeded");
- /* setup a checkerboard selection of the slice of the in memory small
+ /* setup a checkerboard selection of the slice of the in memory small
* data set associated with the process's mpi rank.
*/
@@ -4025,7 +4023,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
sel_start);
/* set up start, stride, count, and block -- note that we will
- * change start[] so as to write checkerboard selections of slices
+ * change start[] so as to write checkerboard selections of slices
* of the small data set to slices of the large data set.
*/
for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
@@ -4047,7 +4045,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s writing process checkerboard selections of slices of small ds to process slices of large ds on disk.\n",
fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
@@ -4061,9 +4059,9 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -4087,7 +4085,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -4162,13 +4160,13 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
VRFY((check == TRUE), "H5S_select_shape_same_test passed");
- /* write the small data set slice from memory to the
- * target slice of the disk data set
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
*/
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -4181,11 +4179,11 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_sid_1,
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dwrite of small ds slice to large ds succeeded");
- /* read this processes slice on the on disk large
+ /* read this processes slice on the on disk large
* data set into memory.
*/
@@ -4195,18 +4193,18 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_sid_0,
tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dread() of process slice of large ds succeeded");
/* verify that the expected data and only the
* expected data was read.
*/
- expected_value =
+ expected_value =
(uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
+ ((i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -4252,7 +4250,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"small ds cb slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
@@ -4280,22 +4278,22 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__run_test()
+ * Function: ckrbrd_hs_dr_pio_test__run_test()
*
- * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
- * different rank in the parallel.
+ * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
+ * different rank in the parallel.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 10/10/09
+ * Programmer: JRM -- 10/10/09
*
* Modifications:
*
- * JRM -- 9/16/10
- * Added the express_test parameter. Use it to control
- * whether we set an alignment, and whether we allocate
- * chunks such that no two processes will normally touch
- * the same chunk.
+ * JRM -- 9/16/10
+ * Added the express_test parameter. Use it to control
+ * whether we set an alignment, and whether we allocate
+ * chunks such that no two processes will normally touch
+ * the same chunk.
*
*-------------------------------------------------------------------------
*/
@@ -4322,10 +4320,10 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()";
#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- int mpi_rank; /* needed by VRFY */
- struct hs_dr_pio_test_vars_t test_vars =
+ int mpi_rank; /* needed by VRFY */
+ struct hs_dr_pio_test_vars_t test_vars =
{
- /* int mpi_size = */ -1,
+ /* int mpi_size = */ -1,
/* int mpi_rank = */ -1,
/* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
/* MPI_Inf mpi_info = */ MPI_INFO_NULL,
@@ -4341,7 +4339,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* uint32_t * small_ds_buf_2 = */ NULL,
/* uint32_t * small_ds_slice_buf = */ NULL,
/* uint32_t * large_ds_buf_0 = */ NULL,
- /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
/* uint32_t * large_ds_buf_2 = */ NULL,
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
@@ -4378,17 +4376,17 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* hsize_t * stride_ptr = */ NULL,
/* hsize_t * count_ptr = */ NULL,
/* hsize_t * block_ptr = */ NULL,
- /* int skips = */ 0,
- /* int max_skips = */ 0,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
/* int64_t total_tests = */ 0,
/* int64_t tests_run = */ 0,
/* int64_t tests_skipped = */ 0
};
struct hs_dr_pio_test_vars_t * tv_ptr = &test_vars;
- hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size,
- chunk_edge_size, small_rank, large_rank,
- use_collective_io, dset_type, express_test,
+ hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size,
+ chunk_edge_size, small_rank, large_rank,
+ use_collective_io, dset_type, express_test,
tv_ptr);
@@ -4414,9 +4412,9 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
* of different rank that H5S_select_shape_same() views as being of the
* same shape.
*
- * Start by reading a (small_rank - 1)-D slice from this processes slice
- * of the on disk large data set, and verifying that the data read is
- * correct. Verify that H5S_select_shape_same() returns true on the
+ * Start by reading a (small_rank - 1)-D slice from this processes slice
+ * of the on disk large data set, and verifying that the data read is
+ * correct. Verify that H5S_select_shape_same() returns true on the
* memory and file selections.
*
* The first step is to set up the needed checker board selection in the
@@ -4426,8 +4424,8 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr);
- /* similarly, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
@@ -4439,20 +4437,20 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
* H5S_select_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small dataset. After each write, read the slice of
- * the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5S_select_shape_same() returns true on
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5S_select_shape_same() returns true on
* the memory and file selections.
*/
ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr);
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5S_select_shape_same() returns true on the memory
* and file selections.
*/
@@ -4461,8 +4459,8 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
- HDfprintf(stdout,
- "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ HDfprintf(stdout,
+ "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
(long long)(tv_ptr->total_tests));
}
@@ -4487,28 +4485,28 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test()
+ * Function: ckrbrd_hs_dr_pio_test()
*
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel case.
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/18/09
+ * Programmer: JRM -- 9/18/09
*
* Modifications:
*
- * Modified function to take a sample of the run times
- * of the different tests, and skip some of them if
- * run times are too long.
+ * Modified function to take a sample of the run times
+ * of the different tests, and skip some of them if
+ * run times are too long.
*
- * We need to do this because Lustre runns very slowly
- * if two or more processes are banging on the same
- * block of memory.
- * JRM -- 9/10/10
- * Break this one big test into 4 smaller tests according
- * to {independent,collective}x{contigous,chunked} datasets.
- * AKC -- 2010/01/17
+ * We need to do this because Lustre runns very slowly
+ * if two or more processes are banging on the same
+ * block of memory.
+ * JRM -- 9/10/10
+ * Break this one big test into 4 smaller tests according
+ * to {independent,collective}x{contigous,chunked} datasets.
+ * AKC -- 2010/01/17
*
*-------------------------------------------------------------------------
*/
@@ -4518,16 +4516,16 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
{
int express_test;
int local_express_test;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
- int test_num = 0;
- int edge_size;
+ int test_num = 0;
+ int edge_size;
int checker_edge_size = 3;
- int chunk_edge_size = 0;
- int small_rank = 3;
- int large_rank = 4;
- int mpi_result;
- hid_t dset_type = H5T_NATIVE_UINT;
+ int chunk_edge_size = 0;
+ int small_rank = 3;
+ int large_rank = 4;
+ int mpi_result;
+ hid_t dset_type = H5T_NATIVE_UINT;
int skips = 0;
int max_skips = 0;
/* The following table list the number of sub-tests skipped between
@@ -4566,13 +4564,13 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
max_skips = max_skips_tbl[local_express_test];
}
-#if 0
+#if 0
{
int DebugWait = 1;
-
+
while (DebugWait) ;
}
-#endif
+#endif
for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) {
@@ -4702,15 +4700,15 @@ int dim0;
int dim1;
int chunkdim0;
int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
int ngroups = 512; /* number of groups to create in root
* group. */
-int facc_type = FACC_MPIO; /*Test file access type */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
/* other option flags */
@@ -4722,10 +4720,10 @@ void *old_client_data; /* previous error handler arg.*/
#define NFILENAME 2
#define PARATESTFILE filenames[0]
const char *FILENAME[NFILENAME]={
- "ShapeSameTest",
- NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
+ "ShapeSameTest",
+ NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
@@ -4738,7 +4736,7 @@ void pause_proc(void)
{
int pid;
- h5_stat_t statbuf;
+ h5_stat_t statbuf;
char greenlight[] = "go";
int maxloop = 10;
int loops = 0;
@@ -4755,15 +4753,15 @@ void pause_proc(void)
MPI_Get_processor_name(mpi_name, &mpi_namelen);
if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
- if (!loops++){
- printf("Proc %d (%*s, %d): to debug, attach %d\n",
- mpi_rank, mpi_namelen, mpi_name, pid, pid);
- }
- printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
- fflush(stdout);
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
+ if (!loops++){
+ printf("Proc %d (%*s, %d): to debug, attach %d\n",
+ mpi_rank, mpi_namelen, mpi_name, pid, pid);
+ }
+ printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ fflush(stdout);
HDsleep(time_int);
- }
+ }
MPI_Barrier(MPI_COMM_WORLD);
}
@@ -4775,7 +4773,7 @@ int MPI_Init(int *argc, char ***argv)
pause_proc();
return (ret_code);
}
-#endif /* USE_PAUSE */
+#endif /* USE_PAUSE */
/*
@@ -4785,15 +4783,15 @@ static void
usage(void)
{
printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
printf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
+ "\tset number of datasets for the multiple dataset test\n");
printf("\t-n<n_groups>"
"\tset number of groups for the multiple group test\n");
printf("\t-f <prefix>\tfilename prefix\n");
printf("\t-2\t\tuse Split-file together with MPIO\n");
printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
- ROW_FACTOR, COL_FACTOR);
+ ROW_FACTOR, COL_FACTOR);
printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
printf("\n");
}
@@ -4805,7 +4803,7 @@ usage(void)
static int
parse_options(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -4816,107 +4814,107 @@ parse_options(int argc, char **argv)
chunkdim1 = (dim1+9)/10;
while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'm': ndatasets = atoi((*argv+1)+1);
- if (ndatasets < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'n': ngroups = atoi((*argv+1)+1);
- if (ngroups < 0){
+ if (**(++argv) != '-'){
+ break;
+ }else{
+ switch(*(*argv+1)){
+ case 'm': ndatasets = atoi((*argv+1)+1);
+ if (ndatasets < 0){
+ nerrors++;
+ return(1);
+ }
+ break;
+ case 'n': ngroups = atoi((*argv+1)+1);
+ if (ngroups < 0){
nerrors++;
return(1);
- }
+ }
break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- dim0 = atoi(*(++argv))*mpi_size;
- argc--;
- dim1 = atoi(*(++argv))*mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: printf("Illegal option(%s)\n", *argv);
- nerrors++;
- return(1);
- }
- }
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ dim0 = atoi(*(++argv))*mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv))*mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0+9)/10;
+ chunkdim1 = (dim1+9)/10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return(1);
+ default: printf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return(1);
+ }
+ }
} /*while*/
/* check validity of dimension and chunk sizes */
if (dim0 <= 0 || dim1 <= 0){
- printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
- nerrors++;
- return(1);
+ printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return(1);
}
if (chunkdim0 <= 0 || chunkdim1 <= 0){
- printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
- nerrors++;
- return(1);
+ printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return(1);
}
/* Make sure datasets can be divided into equal portions by the processes */
if ((dim0 % mpi_size) || (dim1 % mpi_size)){
- if (MAINPROCESS)
- printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
- dim0, dim1, mpi_size);
- nerrors++;
- return(1);
+ if (MAINPROCESS)
+ printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
+ dim0, dim1, mpi_size);
+ nerrors++;
+ return(1);
}
/* compose the test filenames */
{
- int i, n;
-
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
-
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
- == NULL){
- printf("h5_fixname failed\n");
- nerrors++;
- return(1);
- }
- printf("Test filenames are:\n");
- for (i=0; i < n; i++)
- printf(" %s\n", filenames[i]);
+ int i, n;
+
+ n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i=0; i < n; i++)
+ if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
+ == NULL){
+ printf("h5_fixname failed\n");
+ nerrors++;
+ return(1);
+ }
+ printf("Test filenames are:\n");
+ for (i=0; i < n; i++)
+ printf(" %s\n", filenames[i]);
}
return(0);
@@ -4931,7 +4929,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
hid_t ret_pl = -1;
herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -4940,36 +4938,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
- return(ret_pl);
+ VRFY((ret >= 0), "");
+ return(ret_pl);
}
if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
}
/* unknown file access types */
@@ -5037,7 +5035,7 @@ sschecker4(void)
int main(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
#ifndef H5_HAVE_WIN32_API
/* Un-buffer the stdout and stderr */
@@ -5053,10 +5051,10 @@ int main(int argc, char **argv)
dim1 = COL_FACTOR*mpi_size;
if (MAINPROCESS){
- printf("===================================\n");
- printf("Shape Same Tests Start\n");
- printf(" express_test = %d.\n", GetTestExpress());
- printf("===================================\n");
+ printf("===================================\n");
+ printf("Shape Same Tests Start\n");
+ printf(" express_test = %d.\n", GetTestExpress());
+ printf("===================================\n");
}
/* Attempt to turn off atexit post processing so that in case errors
@@ -5065,7 +5063,7 @@ int main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
};
H5open();
h5_show_hostname();
@@ -5075,23 +5073,23 @@ int main(int argc, char **argv)
/* Shape Same tests using contigous hyperslab */
AddTest("sscontig1", sscontig1, NULL,
- "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
+ "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
AddTest("sscontig2", sscontig2, NULL,
- "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
+ "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
AddTest("sscontig3", sscontig3, NULL,
- "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
+ "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
AddTest("sscontig4", sscontig4, NULL,
- "Cntg hslab, col IO, chnk dsets", PARATESTFILE);
+ "Cntg hslab, col IO, chnk dsets", PARATESTFILE);
/* Shape Same tests using checker board hyperslab */
AddTest("sschecker1", sschecker1, NULL,
- "Check hslab, ind IO, cntg dsets", PARATESTFILE);
+ "Check hslab, ind IO, cntg dsets", PARATESTFILE);
AddTest("sschecker2", sschecker2, NULL,
- "Check hslab, col IO, cntg dsets", PARATESTFILE);
+ "Check hslab, col IO, cntg dsets", PARATESTFILE);
AddTest("sschecker3", sschecker3, NULL,
- "Check hslab, ind IO, chnk dsets", PARATESTFILE);
+ "Check hslab, ind IO, chnk dsets", PARATESTFILE);
AddTest("sschecker4", sschecker4, NULL,
- "Check hslab, col IO, chnk dsets", PARATESTFILE);
+ "Check hslab, col IO, chnk dsets", PARATESTFILE);
/* Display testing information */
TestInfo(argv[0]);
@@ -5104,9 +5102,9 @@ int main(int argc, char **argv)
TestParseCmdLine(argc, argv);
if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){
- printf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
+ printf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
}
@@ -5131,16 +5129,16 @@ int main(int argc, char **argv)
{
int temp;
MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors=temp;
+ nerrors=temp;
}
- if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
- if (nerrors)
- printf("***Shape Same tests detected %d errors***\n", nerrors);
- else
- printf("Shape Same tests finished with no errors\n");
- printf("===================================\n");
+ if (MAINPROCESS){ /* only process 0 reports */
+ printf("===================================\n");
+ if (nerrors)
+ printf("***Shape Same tests detected %d errors***\n", nerrors);
+ else
+ printf("Shape Same tests finished with no errors\n");
+ printf("===================================\n");
}
/* close HDF5 library */
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index a42df95..20bc4ac 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -23,17 +23,15 @@
2) We will read two datasets with the same hyperslab selection settings,
1. independent read to read independent output,
independent read to read collecive output,
- Compare the result,
- If the result is the same, then collective write succeeds.
+ Compare the result,
+ If the result is the same, then collective write succeeds.
2. collective read to read independent output,
independent read to read independent output,
- Compare the result,
- If the result is the same, then collective read succeeds.
+ Compare the result,
+ If the result is the same, then collective read succeeds.
*/
-#include "hdf5.h"
-#include "H5private.h"
#include "testphdf5.h"
@@ -42,17 +40,17 @@ static void coll_read_test(int chunk_factor);
/*-------------------------------------------------------------------------
- * Function: coll_irregular_cont_write
+ * Function: coll_irregular_cont_write
*
- * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
contiguous storage
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications:
*
@@ -69,17 +67,17 @@ coll_irregular_cont_write(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_cont_read
+ * Function: coll_irregular_cont_read
*
- * Purpose: Wrapper to test the collectively irregular hyperslab read in
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in
contiguous storage
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications:
*
@@ -95,17 +93,17 @@ coll_irregular_cont_read(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_simple_chunk_write
+ * Function: coll_irregular_simple_chunk_write
*
- * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
chunk storage(1 chunk)
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications:
*
@@ -122,17 +120,17 @@ coll_irregular_simple_chunk_write(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_simple_chunk_read
+ * Function: coll_irregular_simple_chunk_read
*
- * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
storage(1 chunk)
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications:
*
@@ -147,17 +145,17 @@ coll_irregular_simple_chunk_read(void)
}
/*-------------------------------------------------------------------------
- * Function: coll_irregular_complex_chunk_write
+ * Function: coll_irregular_complex_chunk_write
*
- * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
storage(4 chunks)
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications:
*
@@ -174,17 +172,17 @@ coll_irregular_complex_chunk_write(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_complex_chunk_read
+ * Function: coll_irregular_complex_chunk_read
*
- * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
storage(1 chunk)
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications:
*
@@ -200,18 +198,18 @@ coll_irregular_complex_chunk_read(void)
/*-------------------------------------------------------------------------
- * Function: coll_write_test
+ * Function: coll_write_test
*
- * Purpose: To test the collectively irregular hyperslab write in chunk
+ * Purpose: To test the collectively irregular hyperslab write in chunk
storage
* Input: number of chunks on each dimension
if number is equal to 0, contiguous storage
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications: Oct 18th, 2005
*
@@ -229,14 +227,14 @@ void coll_write_test(int chunk_factor)
#if 0
hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset
- (in memory) */
+ (in memory) */
hsize_t fsdim[] = {FSPACE_DIM1, FSPACE_DIM2}; /* Dimension sizes of the dataset
(on disk) */
hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
- dataset in memory when we
- read selection from the
- dataset on the disk */
+ dataset in memory when we
+ read selection from the
+ dataset on the disk */
#endif
hsize_t start[2]; /* Start of hyperslab */
@@ -252,7 +250,7 @@ void coll_write_test(int chunk_factor)
#if 0
int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
- dataset */
+ dataset */
int vector[MSPACE1_DIM];
#endif
@@ -616,12 +614,12 @@ void coll_write_test(int chunk_factor)
*/
ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid,
- H5P_DEFAULT, matrix_out);
+ H5P_DEFAULT, matrix_out);
VRFY((ret >= 0),"H5D independent read succeed");
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid,
- H5P_DEFAULT, matrix_out1);
+ H5P_DEFAULT, matrix_out1);
VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
@@ -668,18 +666,18 @@ void coll_write_test(int chunk_factor)
}
/*-------------------------------------------------------------------------
- * Function: coll_read_test
+ * Function: coll_read_test
*
- * Purpose: To test the collectively irregular hyperslab read in chunk
+ * Purpose: To test the collectively irregular hyperslab read in chunk
storage
* Input: number of chunks on each dimension
if number is equal to 0, contiguous storage
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
* Modifications: Oct 18th, 2005
* Note: This test must be used with the correpsonding
@@ -699,9 +697,9 @@ coll_read_test(int chunk_factor)
/* Dimension sizes of the dataset (on disk) */
#if 0
hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
- dataset in memory when we
- read selection from the
- dataset on the disk */
+ dataset in memory when we
+ read selection from the
+ dataset on the disk */
#endif
hsize_t mdim[2];
@@ -718,7 +716,7 @@ coll_read_test(int chunk_factor)
#if 0
int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
- dataset */
+ dataset */
#endif
int mpi_size,mpi_rank;
@@ -882,7 +880,7 @@ coll_read_test(int chunk_factor)
/* Collective read */
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
- dxfer_plist, matrix_out);
+ dxfer_plist, matrix_out);
VRFY((ret >= 0),"H5D collecive read succeed");
ret = H5Pclose(dxfer_plist);
@@ -890,7 +888,7 @@ coll_read_test(int chunk_factor)
/* Independent read */
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
- H5P_DEFAULT, matrix_out1);
+ H5P_DEFAULT, matrix_out1);
VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
@@ -933,30 +931,30 @@ coll_read_test(int chunk_factor)
/****************************************************************
**
-** lower_dim_size_comp_test__select_checker_board():
+** lower_dim_size_comp_test__select_checker_board():
**
-** Given a data space of tgt_rank, and dimensions:
+** Given a data space of tgt_rank, and dimensions:
**
-** (mpi_size + 1), edge_size, ... , edge_size
+** (mpi_size + 1), edge_size, ... , edge_size
**
-** edge_size, and a checker_edge_size, select a checker
-** board selection of a sel_rank (sel_rank < tgt_rank)
-** dimensional slice through the data space parallel to the
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the data space parallel to the
** sel_rank fastest changing indicies, with origin (in the
-** higher indicies) as indicated by the start array.
+** higher indicies) as indicated by the start array.
**
-** Note that this function, is hard coded to presume a
-** maximum data space rank of 5.
+** Note that this function, is hard coded to presume a
+** maximum data space rank of 5.
**
-** While this maximum is declared as a constant, increasing
-** it will require extensive coding in addition to changing
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
** the value of the constant.
**
-** JRM -- 11/11/09
+** JRM -- 11/11/09
**
****************************************************************/
-#define LDSCT_DS_RANK 5
+#define LDSCT_DS_RANK 5
#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
@@ -971,32 +969,32 @@ lower_dim_size_comp_test__select_checker_board(
const int sel_rank,
hsize_t sel_start[])
{
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char * fcnName =
- "lower_dim_size_comp_test__select_checker_board():";
-#endif
- hbool_t first_selection = TRUE;
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char * fcnName =
+ "lower_dim_size_comp_test__select_checker_board():";
+#endif
+ hbool_t first_selection = TRUE;
int i, j, k, l, m;
- int ds_offset;
- int sel_offset;
- const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
+ int ds_offset;
+ int sel_offset;
+ const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
/* this changes */
- hsize_t base_count;
+ hsize_t base_count;
hsize_t offset_count;
- hsize_t start[LDSCT_DS_RANK];
- hsize_t stride[LDSCT_DS_RANK];
- hsize_t count[LDSCT_DS_RANK];
- hsize_t block[LDSCT_DS_RANK];
- herr_t ret; /* Generic return value */
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ herr_t ret; /* Generic return value */
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n",
fcnName, mpi_rank, (int)dims[0], (int)dims[1], (int)dims[2],
(int)dims[3], (int)dims[4], checker_edge_size);
}
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
HDassert( 0 < checker_edge_size );
HDassert( 0 < sel_rank );
@@ -1014,14 +1012,14 @@ lower_dim_size_comp_test__select_checker_board(
HDassert( (hsize_t)checker_edge_size <= dims[sel_offset] );
HDassert( dims[sel_offset] == 10 );
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
fcnName, mpi_rank, sel_rank, sel_offset);
- HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n",
fcnName, mpi_rank, tgt_rank, ds_offset);
}
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* First, compute the base count (which assumes start == 0
* for the associated offset) and offset_count (which
@@ -1043,25 +1041,25 @@ lower_dim_size_comp_test__select_checker_board(
base_count++;
}
- offset_count =
- (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) /
+ offset_count =
+ (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) /
((hsize_t)(checker_edge_size * 2)));
- if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) %
+ if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) %
((hsize_t)(checker_edge_size * 2))) > 0 ) {
offset_count++;
}
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n",
fcnName, mpi_rank, base_count, offset_count);
}
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* Now set up the stride and block arrays, and portions of the start
- * and count arrays that will not be altered during the selection of
+ * and count arrays that will not be altered during the selection of
* the checker board.
*/
i = 0;
@@ -1093,7 +1091,7 @@ lower_dim_size_comp_test__select_checker_board(
i++;
}
-
+
i = 0;
do {
if ( 0 >= sel_offset ) {
@@ -1112,7 +1110,7 @@ lower_dim_size_comp_test__select_checker_board(
}
j = 0;
- do {
+ do {
if ( 1 >= sel_offset ) {
if ( j == 0 ) {
@@ -1181,78 +1179,78 @@ lower_dim_size_comp_test__select_checker_board(
if ( ((i + j + k + l + m) % 2) == 0 ) {
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s%d: *** first_selection = %d ***\n",
+ HDfprintf(stdout,
+ "%s%d: *** first_selection = %d ***\n",
fcnName, mpi_rank, (int)first_selection);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
fcnName, mpi_rank, i, j, k, l, m);
- HDfprintf(stdout,
- "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)start[0], (int)start[1],
- (int)start[2], (int)start[3],
+ HDfprintf(stdout,
+ "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3],
(int)start[4]);
- HDfprintf(stdout,
- "%s:%d: stride = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3],
+ HDfprintf(stdout,
+ "%s:%d: stride = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3],
(int)stride[4]);
- HDfprintf(stdout,
- "%s:%d: count = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)count[0], (int)count[1],
- (int)count[2], (int)count[3],
+ HDfprintf(stdout,
+ "%s:%d: count = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3],
(int)count[4]);
- HDfprintf(stdout,
- "%s:%d: block = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)block[0], (int)block[1],
- (int)block[2], (int)block[3],
+ HDfprintf(stdout,
+ "%s:%d: block = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3],
(int)block[4]);
- HDfprintf(stdout,
- "%s:%d: n-cube extent dims = %d.\n",
+ HDfprintf(stdout,
+ "%s:%d: n-cube extent dims = %d.\n",
fcnName, mpi_rank,
H5Sget_simple_extent_ndims(tgt_sid));
- HDfprintf(stdout,
- "%s:%d: selection rank = %d.\n",
+ HDfprintf(stdout,
+ "%s:%d: selection rank = %d.\n",
fcnName, mpi_rank, sel_rank);
}
#endif
if ( first_selection ) {
- first_selection = FALSE;
+ first_selection = FALSE;
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_SET,
- &(start[ds_offset]),
- &(stride[ds_offset]),
- &(count[ds_offset]),
+ &(start[ds_offset]),
+ &(stride[ds_offset]),
+ &(count[ds_offset]),
&(block[ds_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
} else {
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_OR,
- &(start[ds_offset]),
- &(stride[ds_offset]),
- &(count[ds_offset]),
+ &(start[ds_offset]),
+ &(stride[ds_offset]),
+ &(count[ds_offset]),
&(block[ds_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
}
@@ -1284,7 +1282,7 @@ lower_dim_size_comp_test__select_checker_board(
} while ( ( i <= 1 ) &&
( 0 >= sel_offset ) );
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
@@ -1306,7 +1304,7 @@ lower_dim_size_comp_test__select_checker_board(
VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
@@ -1321,57 +1319,57 @@ lower_dim_size_comp_test__select_checker_board(
/****************************************************************
**
-** lower_dim_size_comp_test__verify_data():
+** lower_dim_size_comp_test__verify_data():
**
-** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
** otherwise.
**
-** The supplied buffer is presumed to this process's slice
-** of the target data set. Each such slice will be an
-** n-cube of rank (rank -1) and the supplied edge_size with
-** origin (mpi_rank, 0, ... , 0) in the target data set.
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
**
-** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
-** of the target data set. Also, this slice must be parallel
-** to the fastest changing indicies.
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indicies.
**
-** It is further presumed that the buffer was zeroed before
-** the read/write, and that the full target data set (i.e.
-** the buffer/data set for all processes) was initialized
-** with the natural numbers listed in order from the origin
-** along the fastest changing axis.
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
**
** Thus for a 20x10x10 dataset, the value stored in location
-** (x, y, z) (assuming that z is the fastest changing index
-** and x the slowest) is assumed to be:
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
**
-** (10 * 10 * x) + (10 * y) + z
+** (10 * 10 * x) + (10 * y) + z
**
-** Further, supposing that this is process 10, this process's
-** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
-** to the checkerboard selection) as follows:
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
**
-** 1000, 1001, 1002, ... 1008, 1009
-** 1010, 1011, 1012, ... 1018, 1019
-** . . . . .
-** . . . . .
-** . . . . .
-** 1090, 1091, 1092, ... 1098, 1099
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
**
-** In the case of a read from the processors slice of another
-** data set of different rank, the values expected will have
-** to be adjusted accordingly. This is done via the
-** first_expected_val parameter.
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
**
-** Finally, the function presumes that the first element
-** of the buffer resides either at the origin of either
-** a selected or an unselected checker. (Translation:
-** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube oposite the origin.)
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube oposite the origin.)
**
****************************************************************/
@@ -1379,7 +1377,7 @@ lower_dim_size_comp_test__select_checker_board(
static hbool_t
lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const int mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
const int rank,
@@ -1389,8 +1387,8 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
hbool_t buf_starts_in_checker)
{
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- const char * fcnName =
- "lower_dim_size_comp_test__verify_data():";
+ const char * fcnName =
+ "lower_dim_size_comp_test__verify_data():";
#endif
hbool_t good_data = TRUE;
hbool_t in_checker;
@@ -1409,16 +1407,16 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
HDassert( checker_edge_size <= edge_size );
HDassert( test_max_rank <= LDSCT_DS_RANK );
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
- HDfprintf(stdout, "%s checker_edge_size = %d.\n",
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n",
fcnName, checker_edge_size);
- HDfprintf(stdout, "%s first_expected_val = %d.\n",
+ HDfprintf(stdout, "%s first_expected_val = %d.\n",
fcnName, (int)first_expected_val);
- HDfprintf(stdout, "%s starts_in_checker = %d.\n",
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n",
fcnName, (int)buf_starts_in_checker);
}
#endif
@@ -1463,7 +1461,7 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
y = 0;
start_in_checker[3] = start_in_checker[2];
do
- {
+ {
if ( y >= checker_edge_size ) {
start_in_checker[3] = ! start_in_checker[3];
@@ -1472,8 +1470,8 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
m = 0;
z = 0;
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
}
@@ -1481,8 +1479,8 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
in_checker = start_in_checker[3];
do
{
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, " %d", (int)(*val_ptr));
}
@@ -1492,21 +1490,21 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
in_checker = ! in_checker;
z = 0;
}
-
+
if ( in_checker ) {
-
+
if ( *val_ptr != expected_value ) {
good_data = FALSE;
}
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
} else if ( *val_ptr != 0 ) {
good_data = FALSE;
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
@@ -1516,11 +1514,11 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
expected_value++;
m++;
z++;
-
+
} while ( ( rank >= (test_max_rank - 4) ) &&
( m < edge_size ) );
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "\n");
}
@@ -1548,22 +1546,22 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
/*-------------------------------------------------------------------------
- * Function: lower_dim_size_comp_test__run_test()
+ * Function: lower_dim_size_comp_test__run_test()
*
- * Purpose: Verify that a bug in the computation of the size of the
- * lower dimensions of a data space in H5S_obtain_datatype()
- * has been corrected.
+ * Purpose: Verify that a bug in the computation of the size of the
+ * lower dimensions of a data space in H5S_obtain_datatype()
+ * has been corrected.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 11/11/09
+ * Programmer: JRM -- 11/11/09
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-#define LDSCT_DS_RANK 5
+#define LDSCT_DS_RANK 5
#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
static void
@@ -1571,21 +1569,21 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
const hbool_t use_collective_io,
const hid_t dset_type)
{
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
const char *fcnName = "lower_dim_size_comp_test__run_test()";
- int rank;
- hsize_t dims[32];
- hsize_t max_dims[32];
+ int rank;
+ hsize_t dims[32];
+ hsize_t max_dims[32];
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
const char *filename;
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
int i;
int start_index;
int stop_index;
- int mrc;
- int mpi_rank;
- int mpi_size;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
MPI_Comm mpi_comm = MPI_COMM_NULL;
MPI_Info mpi_info = MPI_INFO_NULL;
hid_t fid; /* HDF5 file ID */
@@ -1635,7 +1633,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
mpi_comm = MPI_COMM_WORLD;
mpi_info = MPI_INFO_NULL;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n",
fcnName, mpi_rank, (int)chunk_edge_size);
@@ -1650,13 +1648,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
large_ds_slice_size = (size_t) (10 * 10 * 10 * 10);
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)small_ds_size, (int)small_ds_slice_size);
HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)large_ds_size, (int)large_ds_slice_size);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -1739,7 +1737,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_dims[3] = 10;
large_dims[4] = 10;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)small_dims[0], (int)small_dims[1],
@@ -1748,41 +1746,41 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
fcnName, mpi_rank, (int)large_dims[0], (int)large_dims[1],
(int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
}
-#endif
+#endif
/* create data spaces */
full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_mem_small_ds_sid != 0),
+ VRFY((full_mem_small_ds_sid != 0),
"H5Screate_simple() full_mem_small_ds_sid succeeded");
full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_file_small_ds_sid != 0),
+ VRFY((full_file_small_ds_sid != 0),
"H5Screate_simple() full_file_small_ds_sid succeeded");
mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((mem_small_ds_sid != 0),
+ VRFY((mem_small_ds_sid != 0),
"H5Screate_simple() mem_small_ds_sid succeeded");
file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((file_small_ds_sid != 0),
+ VRFY((file_small_ds_sid != 0),
"H5Screate_simple() file_small_ds_sid succeeded");
full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_mem_large_ds_sid != 0),
+ VRFY((full_mem_large_ds_sid != 0),
"H5Screate_simple() full_mem_large_ds_sid succeeded");
full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_file_large_ds_sid != 0),
+ VRFY((full_file_large_ds_sid != 0),
"H5Screate_simple() full_file_large_ds_sid succeeded");
mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((mem_large_ds_sid != 0),
+ VRFY((mem_large_ds_sid != 0),
"H5Screate_simple() mem_large_ds_sid succeeded");
file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((file_large_ds_sid != 0),
+ VRFY((file_large_ds_sid != 0),
"H5Screate_simple() file_large_ds_sid succeeded");
@@ -1812,14 +1810,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)small_chunk_dims[0],
- (int)small_chunk_dims[1], (int)small_chunk_dims[2],
+ fcnName, mpi_rank, (int)small_chunk_dims[0],
+ (int)small_chunk_dims[1], (int)small_chunk_dims[2],
(int)small_chunk_dims[3], (int)small_chunk_dims[4]);
}
-#endif
+#endif
small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
@@ -1831,18 +1829,18 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
large_chunk_dims[0] = (hsize_t)(1);
- large_chunk_dims[1] = large_chunk_dims[2] =
- large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size;
+ large_chunk_dims[1] = large_chunk_dims[2] =
+ large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)large_chunk_dims[0],
- (int)large_chunk_dims[1], (int)large_chunk_dims[2],
+ fcnName, mpi_rank, (int)large_chunk_dims[0],
+ (int)large_chunk_dims[1], (int)large_chunk_dims[2],
(int)large_chunk_dims[3], (int)large_chunk_dims[4]);
}
-#endif
+#endif
large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
@@ -1868,11 +1866,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_ds_dcpl_id, H5P_DEFAULT);
VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: small/large ds id = %d / %d.\n",
- fcnName, mpi_rank, (int)small_dataset,
+ HDfprintf(stdout,
+ "%s:%d: small/large ds id = %d / %d.\n",
+ fcnName, mpi_rank, (int)small_dataset,
(int)large_dataset);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -1906,10 +1904,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[0] = block[1] = block[2] = 1;
block[3] = block[4] = 10;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: settings for small data set initialization.\n",
+ HDfprintf(stdout,
+ "%s:%d: settings for small data set initialization.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -1947,10 +1945,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
start[0] = 0;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: added settings for main process.\n",
+ HDfprintf(stdout,
+ "%s:%d: added settings for main process.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -1992,23 +1990,23 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/* write the initial value of the small data set to file */
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n",
fcnName, mpi_rank);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(small_dataset,
- dset_type,
- mem_small_ds_sid,
+ ret = H5Dwrite(small_dataset,
+ dset_type,
+ mem_small_ds_sid,
file_small_ds_sid,
- xfer_plist,
+ xfer_plist,
small_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
- /* read the small data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set and verifies it.
*/
ret = H5Dread(small_dataset,
@@ -2061,10 +2059,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[0] = (hsize_t)1;
block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: settings for large data set initialization.\n",
+ HDfprintf(stdout,
+ "%s:%d: settings for large data set initialization.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -2097,15 +2095,15 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(mem_large_ds_sid));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(file_large_ds_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2114,10 +2112,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
start[0] = (hsize_t)0;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: added settings for main process.\n",
+ HDfprintf(stdout,
+ "%s:%d: added settings for main process.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -2150,15 +2148,15 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block);
VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(mem_large_ds_sid));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(file_large_ds_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2183,19 +2181,19 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
start, stride, count, block);
VRFY((ret != FAIL),"H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n",
- fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
+ fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
(int)dims[2], (int)dims[3], (int)dims[4]);
rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n",
- fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
+ fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
(int)dims[2], (int)dims[3], (int)dims[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2208,26 +2206,26 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/* write the initial value of the large data set to file */
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n",
fcnName, mpi_rank);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: large_dataset = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)large_dataset);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)mem_large_ds_sid, (int)file_large_ds_sid);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(large_dataset,
- dset_type,
- mem_large_ds_sid,
+ ret = H5Dwrite(large_dataset,
+ dset_type,
+ mem_large_ds_sid,
file_large_ds_sid,
- xfer_plist,
+ xfer_plist,
large_ds_buf_0);
if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr);
@@ -2238,8 +2236,8 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
- /* read the large data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set.
*/
ret = H5Dread(large_dataset,
@@ -2278,13 +2276,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/***********************************/
- /* read a checkerboard selection of the process slice of the
- * small on disk data set into the process slice of the large
+ /* read a checkerboard selection of the process slice of the
+ * small on disk data set into the process slice of the large
* in memory data set, and verify the data read.
*/
small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] =
+ small_sel_start[1] = small_sel_start[2] =
small_sel_start[3] = small_sel_start[4] = 0;
lower_dim_size_comp_test__select_checker_board(mpi_rank,
@@ -2296,9 +2294,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
small_sel_start);
expected_value = (uint32_t)
- ((small_sel_start[0] * small_dims[1] * small_dims[2] *
+ ((small_sel_start[0] * small_dims[1] * small_dims[2] *
small_dims[3] * small_dims[4]) +
- (small_sel_start[1] * small_dims[2] * small_dims[3] *
+ (small_sel_start[1] * small_dims[2] * small_dims[3] *
small_dims[4]) +
(small_sel_start[2] * small_dims[3] * small_dims[4]) +
(small_sel_start[3] * small_dims[4]) +
@@ -2335,7 +2333,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
}
@@ -2345,9 +2343,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
data_ok = TRUE;
- start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] *
+ start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] *
large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] *
+ (large_sel_start[1] * large_dims[2] * large_dims[3] *
large_dims[4]) +
(large_sel_start[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[3] * large_dims[4]) +
@@ -2409,13 +2407,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
- /* read a checkerboard selection of a slice of the process slice of
- * the large on disk data set into the process slice of the small
+ /* read a checkerboard selection of a slice of the process slice of
+ * the large on disk data set into the process slice of the small
* in memory data set, and verify the data read.
*/
small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] =
+ small_sel_start[1] = small_sel_start[2] =
small_sel_start[3] = small_sel_start[4] = 0;
lower_dim_size_comp_test__select_checker_board(mpi_rank,
@@ -2456,7 +2454,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
}
@@ -2467,9 +2465,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
data_ok = TRUE;
expected_value = (uint32_t)
- ((large_sel_start[0] * large_dims[1] * large_dims[2] *
+ ((large_sel_start[0] * large_dims[1] * large_dims[2] *
large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] *
+ (large_sel_start[1] * large_dims[2] * large_dims[3] *
large_dims[4]) +
(large_sel_start[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[3] * large_dims[4]) +
@@ -2522,7 +2520,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n",
+ HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n",
fcnName, mpi_rank, (int)i, (int)(*ptr_1));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
@@ -2590,15 +2588,15 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/*-------------------------------------------------------------------------
- * Function: lower_dim_size_comp_test()
+ * Function: lower_dim_size_comp_test()
*
- * Purpose: Test to see if an error in the computation of the size
- * of the lower dimensions in H5S_obtain_datatype() has
- * been corrected.
+ * Purpose: Test to see if an error in the computation of the size
+ * of the lower dimensions in H5S_obtain_datatype() has
+ * been corrected.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 11/11/09
+ * Programmer: JRM -- 11/11/09
*
* Modifications:
*
@@ -2609,15 +2607,15 @@ void
lower_dim_size_comp_test(void)
{
/* const char *fcnName = "lower_dim_size_comp_test()"; */
- int chunk_edge_size = 0;
- int use_collective_io = 1;
- hid_t dset_type = H5T_NATIVE_UINT;
+ int chunk_edge_size = 0;
+ int use_collective_io = 1;
+ hid_t dset_type = H5T_NATIVE_UINT;
#if 0
HDsleep(60);
#endif
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
- for ( use_collective_io = (hbool_t)0;
- (int)use_collective_io <= 1;
+ for ( use_collective_io = (hbool_t)0;
+ (int)use_collective_io <= 1;
(hbool_t)(use_collective_io++) ) {
chunk_edge_size = 0;
@@ -2638,37 +2636,37 @@ lower_dim_size_comp_test(void)
/*-------------------------------------------------------------------------
- * Function: link_chunk_collective_io_test()
+ * Function: link_chunk_collective_io_test()
*
- * Purpose: Test to verify that an error in MPI type management in
- * H5D_link_chunk_collective_io() has been corrected.
- * In this bug, we used to free MPI types regardless of
- * whether they were basic or derived.
+ * Purpose: Test to verify that an error in MPI type management in
+ * H5D_link_chunk_collective_io() has been corrected.
+ * In this bug, we used to free MPI types regardless of
+ * whether they were basic or derived.
*
- * This test is based on a bug report kindly provided by
- * Rob Latham of the MPICH team and ANL.
+ * This test is based on a bug report kindly provided by
+ * Rob Latham of the MPICH team and ANL.
*
- * The basic thrust of the test is to cause a process
- * to participate in a collective I/O in which it:
+ * The basic thrust of the test is to cause a process
+ * to participate in a collective I/O in which it:
*
- * 1) Reads or writes exactly one chunk,
+ * 1) Reads or writes exactly one chunk,
*
- * 2) Has no in memory buffer for any other chunk.
+ * 2) Has no in memory buffer for any other chunk.
*
- * The test differers from Rob Latham's bug report in
- * that is runs with an arbitrary number of proceeses,
- * and uses a 1 dimensional dataset.
+ * The test differers from Rob Latham's bug report in
+ * that is runs with an arbitrary number of proceeses,
+ * and uses a 1 dimensional dataset.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 12/16/09
+ * Programmer: JRM -- 12/16/09
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
+#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
void
link_chunk_collective_io_test(void)
@@ -2676,8 +2674,8 @@ link_chunk_collective_io_test(void)
/* const char *fcnName = "link_chunk_collective_io_test()"; */
const char *filename;
hbool_t mis_match = FALSE;
- int i;
- int mrc;
+ int i;
+ int mrc;
int mpi_rank;
int mpi_size;
MPI_Comm mpi_comm = MPI_COMM_WORLD;
@@ -2767,8 +2765,8 @@ link_chunk_collective_io_test(void)
for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) {
local_data_written[i] = expected_value;
- local_data_read[i] = 0.0;
- expected_value += 1.0;
+ local_data_read[i] = 0.0;
+ expected_value += 1.0;
}
/* select the file and mem spaces */
@@ -2794,15 +2792,15 @@ link_chunk_collective_io_test(void)
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write the data set */
- ret = H5Dwrite(dset_id,
- H5T_NATIVE_DOUBLE,
- write_mem_ds_sid,
+ ret = H5Dwrite(dset_id,
+ H5T_NATIVE_DOUBLE,
+ write_mem_ds_sid,
file_ds_sid,
- xfer_plist,
+ xfer_plist,
local_data_written);
VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
-
+
/* sync with the other processes before checking data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after dataset write");
@@ -2830,7 +2828,7 @@ link_chunk_collective_io_test(void)
if ( diff >= 0.001 ) {
mis_match = TRUE;
- }
+ }
}
VRFY( (mis_match == FALSE), "dataset data good.");