summaryrefslogtreecommitdiffstats
path: root/perform
diff options
context:
space:
mode:
authorChristian Chilan <chilan@hdfgroup.org>2008-07-17 22:45:40 (GMT)
committerChristian Chilan <chilan@hdfgroup.org>2008-07-17 22:45:40 (GMT)
commit8b3ced0b9e36b5b687af56106e8338e00443a106 (patch)
tree755f86930fd56d42ab363d76d7d320608cadd226 /perform
parentba711c9cf5e8cbec45bc0c36ee9fe48636e0296d (diff)
downloadhdf5-8b3ced0b9e36b5b687af56106e8338e00443a106.zip
hdf5-8b3ced0b9e36b5b687af56106e8338e00443a106.tar.gz
hdf5-8b3ced0b9e36b5b687af56106e8338e00443a106.tar.bz2
[svn-r15384] Major update of h5perf files including bug fixes and extensions.
Tested on kagiso and abe.
Diffstat (limited to 'perform')
-rw-r--r--perform/pio_engine.c4158
-rw-r--r--perform/pio_perf.c361
2 files changed, 2378 insertions, 2141 deletions
diff --git a/perform/pio_engine.c b/perform/pio_engine.c
index f42a849..6f300c8 100644
--- a/perform/pio_engine.c
+++ b/perform/pio_engine.c
@@ -50,25 +50,25 @@
#define ELMT_MPI_TYPE MPI_BYTE
#define ELMT_H5_TYPE H5T_NATIVE_UCHAR
-#define GOTOERROR(errcode) { ret_code = errcode; goto done; }
-#define GOTODONE { goto done; }
+#define GOTOERROR(errcode) { ret_code = errcode; goto done; }
+#define GOTODONE { goto done; }
#define ERRMSG(mesg) { \
fprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
fprintf(stderr, "*** Assertion failed (%s) at line %4d in %s\n", \
- mesg, (int)__LINE__, __FILE__); \
+ mesg, (int)__LINE__, __FILE__); \
}
#define MSG(mesg) { \
fprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
fprintf(stderr, "(%s) at line %4d in %s\n", \
- mesg, (int)__LINE__, __FILE__); \
+ mesg, (int)__LINE__, __FILE__); \
}
/* verify: if val is false (0), print mesg. */
#define VRFY(val, mesg) do { \
if (!val) { \
- ERRMSG(mesg); \
- GOTOERROR(FAIL); \
+ ERRMSG(mesg); \
+ GOTOERROR(FAIL); \
} \
} while(0)
@@ -88,7 +88,7 @@ enum {
};
/* Global variables */
-static int clean_file_g = -1; /*whether to cleanup temporary test */
+static int clean_file_g = -1; /*whether to cleanup temporary test */
/*files. -1 is not defined; */
/*0 is no cleanup; 1 is do cleanup */
@@ -120,13 +120,13 @@ typedef union _file_descr {
/* local functions */
static char *pio_create_filename(iotype iot, const char *base_name,
- char *fullname, size_t size);
+ char *fullname, size_t size);
static herr_t do_write(results *res, file_descr *fd, parameters *parms,
- long ndsets, off_t nelmts, size_t buf_size, void *buffer);
+ long ndsets, off_t nelmts, size_t buf_size, void *buffer);
static herr_t do_read(results *res, file_descr *fd, parameters *parms,
- long ndsets, off_t nelmts, size_t buf_size, void *buffer /*out*/);
+ long ndsets, off_t nelmts, size_t buf_size, void *buffer /*out*/);
static herr_t do_fopen(parameters *param, char *fname, file_descr *fd /*out*/,
- int flags);
+ int flags);
static herr_t do_fclose(iotype iot, file_descr *fd);
static void do_cleanupfile(iotype iot, char *fname);
@@ -138,16 +138,16 @@ static void gpfs_clear_file_cache(int handle);
static void gpfs_cancel_hints(int handle);
static void gpfs_start_data_shipping(int handle, int num_insts);
static void gpfs_start_data_ship_map(int handle, int partition_size,
- int agent_count, int *agent_node_num);
+ int agent_count, int *agent_node_num);
static void gpfs_stop_data_shipping(int handle);
static void gpfs_invalidate_file_cache(const char *filename);
#endif /* H5_HAVE_GPFS */
/*
- * Function: do_pio
- * Purpose: PIO Engine where Parallel IO are executed.
- * Return: results
- * Programmer: Albert Cheng, Bill Wendling 2001/12/12
+ * Function: do_pio
+ * Purpose: PIO Engine where Parallel IO are executed.
+ * Return: results
+ * Programmer: Albert Cheng, Bill Wendling 2001/12/12
* Modifications:
* Added 2D testing (Christian Chilan, 10. August 2005)
*/
@@ -158,28 +158,25 @@ do_pio(parameters param)
herr_t ret_code = 0; /*return code */
results res;
- file_descr fd;
+ file_descr fd;
iotype iot;
char fname[FILENAME_MAX];
- long nf;
+ long nf;
long ndsets;
- off_t nbytes; /*number of bytes per dataset */
- off_t snbytes; /*general dataset size */
- /*for 1D, it is the actual dataset size */
- /*for 2D, it is the size of a side of the dataset square */
-
- char *buffer = NULL; /*data buffer pointer */
-
- size_t buf_size; /*general buffer size in bytes */
- /*for 1D, it is the actual buffer size */
- /*for 2D, it is the length of the buffer rectangle */
-
- size_t blk_size; /*data block size in bytes */
- size_t bsize; /*actual buffer size */
+ off_t nbytes; /*number of bytes per dataset */
+ off_t snbytes; /*general dataset size */
+ /*for 1D, it is the actual dataset size */
+ /*for 2D, it is the size of a side of the dataset square */
+ char *buffer = NULL; /*data buffer pointer */
+ size_t buf_size; /*general buffer size in bytes */
+ /*for 1D, it is the actual buffer size */
+ /*for 2D, it is the length of the buffer rectangle */
+ size_t blk_size; /*data block size in bytes */
+ size_t bsize; /*actual buffer size */
/* HDF5 variables */
- herr_t hrc; /*HDF5 return code */
+ herr_t hrc; /*HDF5 return code */
/* Sanity check parameters */
@@ -187,22 +184,22 @@ do_pio(parameters param)
iot = param.io_type;
switch (iot) {
- case MPIO:
- fd.mpifd = MPI_FILE_NULL;
- res.timers = pio_time_new(MPI_TIMER);
- break;
- case POSIXIO:
- fd.posixfd = -1;
- res.timers = pio_time_new(MPI_TIMER);
- break;
- case PHDF5:
- fd.h5fd = -1;
- res.timers = pio_time_new(MPI_TIMER);
- break;
- default:
- /* unknown request */
- fprintf(stderr, "Unknown IO type request (%d)\n", iot);
- GOTOERROR(FAIL);
+ case MPIO:
+ fd.mpifd = MPI_FILE_NULL;
+ res.timers = pio_time_new(MPI_TIMER);
+ break;
+ case POSIXIO:
+ fd.posixfd = -1;
+ res.timers = pio_time_new(MPI_TIMER);
+ break;
+ case PHDF5:
+ fd.h5fd = -1;
+ res.timers = pio_time_new(MPI_TIMER);
+ break;
+ default:
+ /* unknown request */
+ fprintf(stderr, "Unknown IO type request (%d)\n", iot);
+ GOTOERROR(FAIL);
}
ndsets = param.num_dsets; /* number of datasets per file */
@@ -210,152 +207,164 @@ do_pio(parameters param)
buf_size = param.buf_size;
blk_size = param.blk_size;
- if (param.dim2d==0){
- snbytes = nbytes; /* General dataset size */
- bsize = buf_size; /* Actual buffer size */
+ if (!param.dim2d){
+ snbytes = nbytes; /* General dataset size */
+ bsize = buf_size; /* Actual buffer size */
}
else {
- snbytes = (off_t)sqrt(nbytes); /* General dataset size */
- bsize = buf_size * blk_size; /* Actual buffer size */
+ snbytes = (off_t)sqrt(nbytes); /* General dataset size */
+ bsize = buf_size * blk_size; /* Actual buffer size */
}
if (param.num_files < 0 ) {
- fprintf(stderr,
- "number of files must be >= 0 (%ld)\n",
- param.num_files);
- GOTOERROR(FAIL);
+ fprintf(stderr,
+ "number of files must be >= 0 (%ld)\n",
+ param.num_files);
+ GOTOERROR(FAIL);
}
if (ndsets < 0 ) {
- fprintf(stderr,
- "number of datasets per file must be >= 0 (%ld)\n",
- ndsets);
- GOTOERROR(FAIL);
+ fprintf(stderr,
+ "number of datasets per file must be >= 0 (%ld)\n",
+ ndsets);
+ GOTOERROR(FAIL);
}
if (param.num_procs <= 0 ) {
- fprintf(stderr,
- "maximum number of process to use must be > 0 (%d)\n",
- param.num_procs);
- GOTOERROR(FAIL);
+ fprintf(stderr,
+ "maximum number of process to use must be > 0 (%d)\n",
+ param.num_procs);
+ GOTOERROR(FAIL);
}
/* Validate transfer buffer size & block size*/
if(blk_size<=0) {
- HDfprintf(stderr,
- "Transfer block size (%Hd) must be > 0\n", (long_long)blk_size);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Transfer block size (%Hd) must be > 0\n", (long_long)blk_size);
+ GOTOERROR(FAIL);
}
if(buf_size<=0) {
- HDfprintf(stderr,
- "Transfer buffer size (%Hd) must be > 0\n", (long_long)buf_size);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Transfer buffer size (%Hd) must be > 0\n", (long_long)buf_size);
+ GOTOERROR(FAIL);
}
if ((buf_size % blk_size) != 0){
- HDfprintf(stderr,
- "Transfer buffer size (%Hd) must be a multiple of the "
- "interleaved I/O block size (%Hd)\n",
- (long_long)buf_size, (long_long)blk_size);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Transfer buffer size (%Hd) must be a multiple of the "
+ "interleaved I/O block size (%Hd)\n",
+ (long_long)buf_size, (long_long)blk_size);
+ GOTOERROR(FAIL);
}
if((snbytes%pio_mpi_nprocs_g)!=0) {
- HDfprintf(stderr,
- "Dataset size (%Hd) must be a multiple of the "
- "number of processes (%d)\n",
- (long_long)nbytes, pio_mpi_nprocs_g);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Dataset size (%Hd) must be a multiple of the "
+ "number of processes (%d)\n",
+ (long_long)snbytes, pio_mpi_nprocs_g);
+ GOTOERROR(FAIL);
}
- if(((snbytes/pio_mpi_nprocs_g)%buf_size)!=0) {
- HDfprintf(stderr,
- "Dataset size/process (%Hd) must be a multiple of the "
- "trasfer buffer size (%Hd)\n",
- (long_long)(nbytes/pio_mpi_nprocs_g), (long_long)buf_size);
- GOTOERROR(FAIL);
+
+ if (!param.dim2d){
+ if(((snbytes/pio_mpi_nprocs_g)%buf_size)!=0) {
+ HDfprintf(stderr,
+ "Dataset size/process (%Hd) must be a multiple of the "
+ "trasfer buffer size (%Hd)\n",
+ (long_long)(snbytes/pio_mpi_nprocs_g), (long_long)buf_size);
+ GOTOERROR(FAIL);
+ }
+ }
+ else {
+ if((snbytes%buf_size)!=0) {
+ HDfprintf(stderr,
+ "Dataset side size (%Hd) must be a multiple of the "
+ "trasfer buffer size (%Hd)\n",
+ (long_long)snbytes, (long_long)buf_size);
+ GOTOERROR(FAIL);
+ }
}
/* Allocate transfer buffer */
if ((buffer = malloc(bsize)) == NULL){
- HDfprintf(stderr, "malloc for transfer buffer size (%Hd) failed\n",
- (long_long)(bsize));
- GOTOERROR(FAIL);
+ HDfprintf(stderr, "malloc for transfer buffer size (%Hd) failed\n",
+ (long_long)(bsize));
+ GOTOERROR(FAIL);
}
if (pio_debug_level >= 4) {
- int myrank;
+ int myrank;
- MPI_Comm_rank(pio_comm_g, &myrank);
+ MPI_Comm_rank(pio_comm_g, &myrank);
- /* output all of the times for all iterations */
- if (myrank == 0)
- fprintf(output, "Timer details:\n");
+ /* output all of the times for all iterations */
+ if (myrank == 0)
+ fprintf(output, "Timer details:\n");
}
for (nf = 1; nf <= param.num_files; nf++) {
- /*
- * Write performance measurement
- */
- /* Open file for write */
- char base_name[256];
-
- sprintf(base_name, "#pio_tmp_%lu", nf);
- pio_create_filename(iot, base_name, fname, sizeof(fname));
- if (pio_debug_level > 0)
- HDfprintf(output, "rank %d: data filename=%s\n",
- pio_mpi_rank_g, fname);
-
- /* Need barrier to make sure everyone starts at the same time */
- MPI_Barrier(pio_comm_g);
-
- set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, START);
- hrc = do_fopen(&param, fname, &fd, PIO_CREATE | PIO_WRITE);
-
- VRFY((hrc == SUCCESS), "do_fopen failed");
-
- set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, START);
- hrc = do_write(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
- hrc == SUCCESS;
- set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, STOP);
-
- VRFY((hrc == SUCCESS), "do_write failed");
-
- /* Close file for write */
- hrc = do_fclose(iot, &fd);
-
- set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, STOP);
- VRFY((hrc == SUCCESS), "do_fclose failed");
-
- if (!param.h5_write_only) {
- /*
- * Read performance measurement
- */
- /* Need barrier to make sure everyone is done writing and has
- * closed the file. Also to make sure everyone starts reading
- * at the same time.
- */
- MPI_Barrier(pio_comm_g);
-
- /* Open file for read */
- set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, START);
- hrc = do_fopen(&param, fname, &fd, PIO_READ);
-
- VRFY((hrc == SUCCESS), "do_fopen failed");
-
- set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, START);
- hrc = do_read(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
- set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, STOP);
- VRFY((hrc == SUCCESS), "do_read failed");
-
- /* Close file for read */
- hrc = do_fclose(iot, &fd);
-
- set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, STOP);
- VRFY((hrc == SUCCESS), "do_fclose failed");
- }
-
- /* Need barrier to make sure everyone is done with the file */
- /* before it may be removed by do_cleanupfile */
- MPI_Barrier(pio_comm_g);
- do_cleanupfile(iot, fname);
+ /*
+ * Write performance measurement
+ */
+ /* Open file for write */
+ char base_name[256];
+
+ sprintf(base_name, "#pio_tmp_%lu", nf);
+ pio_create_filename(iot, base_name, fname, sizeof(fname));
+ if (pio_debug_level > 0)
+ HDfprintf(output, "rank %d: data filename=%s\n",
+ pio_mpi_rank_g, fname);
+
+ /* Need barrier to make sure everyone starts at the same time */
+ MPI_Barrier(pio_comm_g);
+
+ set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, START);
+ hrc = do_fopen(&param, fname, &fd, PIO_CREATE | PIO_WRITE);
+
+ VRFY((hrc == SUCCESS), "do_fopen failed");
+
+ set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, START);
+ hrc = do_write(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
+ hrc == SUCCESS;
+ set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, STOP);
+
+ VRFY((hrc == SUCCESS), "do_write failed");
+
+ /* Close file for write */
+ hrc = do_fclose(iot, &fd);
+
+ set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, STOP);
+ VRFY((hrc == SUCCESS), "do_fclose failed");
+
+ if (!param.h5_write_only) {
+ /*
+ * Read performance measurement
+ */
+ /* Need barrier to make sure everyone is done writing and has
+ * closed the file. Also to make sure everyone starts reading
+ * at the same time.
+ */
+ MPI_Barrier(pio_comm_g);
+
+ /* Open file for read */
+ set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, START);
+ hrc = do_fopen(&param, fname, &fd, PIO_READ);
+
+ VRFY((hrc == SUCCESS), "do_fopen failed");
+
+ set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, START);
+ hrc = do_read(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
+ set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, STOP);
+ VRFY((hrc == SUCCESS), "do_read failed");
+
+ /* Close file for read */
+ hrc = do_fclose(iot, &fd);
+
+ set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, STOP);
+ VRFY((hrc == SUCCESS), "do_fclose failed");
+ }
+
+ /* Need barrier to make sure everyone is done with the file */
+ /* before it may be removed by do_cleanupfile */
+ MPI_Barrier(pio_comm_g);
+ do_cleanupfile(iot, fname);
}
done:
@@ -365,23 +374,23 @@ done:
/* close any opened files */
/* no remove(fname) because that should have happened normally. */
switch (iot) {
- case POSIXIO:
- if (fd.posixfd != -1)
- hrc = do_fclose(iot, &fd);
- break;
- case MPIO:
- if (fd.mpifd != MPI_FILE_NULL)
- hrc = do_fclose(iot, &fd);
- break;
- case PHDF5:
- if (fd.h5fd != -1)
- hrc = do_fclose(iot, &fd);
- break;
+ case POSIXIO:
+ if (fd.posixfd != -1)
+ hrc = do_fclose(iot, &fd);
+ break;
+ case MPIO:
+ if (fd.mpifd != MPI_FILE_NULL)
+ hrc = do_fclose(iot, &fd);
+ break;
+ case PHDF5:
+ if (fd.h5fd != -1)
+ hrc = do_fclose(iot, &fd);
+ break;
}
/* release generic resources */
if(buffer)
- free(buffer);
+ free(buffer);
res.ret_code = ret_code;
return res;
}
@@ -404,20 +413,20 @@ pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si
size_t i, j;
if (!base_name || !fullname || size < 1)
- return NULL;
+ return NULL;
memset(fullname, 0, size);
switch (iot) {
- case POSIXIO:
- suffix = ".posix";
- break;
- case MPIO:
- suffix = ".mpio";
- break;
- case PHDF5:
- suffix = ".h5";
- break;
+ case POSIXIO:
+ suffix = ".posix";
+ break;
+ case MPIO:
+ suffix = ".mpio";
+ break;
+ case PHDF5:
+ suffix = ".h5";
+ break;
}
/* First use the environment variable and then try the constant */
@@ -425,118 +434,125 @@ pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si
#ifdef HDF5_PARAPREFIX
if (!prefix)
- prefix = HDF5_PARAPREFIX;
+ prefix = HDF5_PARAPREFIX;
#endif /* HDF5_PARAPREFIX */
/* Prepend the prefix value to the base name */
if (prefix && *prefix) {
- /* If the prefix specifies the HDF5_PARAPREFIX directory, then
- * default to using the "/tmp/$USER" or "/tmp/$LOGIN"
- * directory instead. */
- register char *user, *login, *subdir;
-
- user = getenv("USER");
- login = getenv("LOGIN");
- subdir = (user ? user : login);
-
- if (subdir) {
- for (i = 0; i < size && prefix[i]; i++)
- fullname[i] = prefix[i];
-
- fullname[i++] = '/';
-
- for (j = 0; i < size && subdir[j]; i++, j++)
- fullname[i] = subdir[j];
- } else {
- /* We didn't append the prefix yet */
- strncpy(fullname, prefix, MIN(strlen(prefix), size));
- }
-
- if ((strlen(fullname) + strlen(base_name) + 1) < size) {
- /* Append the base_name with a slash first. Multiple slashes are
- * handled below. */
- h5_stat_t buf;
-
- if (HDstat(fullname, &buf) < 0)
- /* The directory doesn't exist just yet */
- if (mkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST) {
- /* We couldn't make the "/tmp/${USER,LOGIN}" subdirectory.
- * Default to PREFIX's original prefix value. */
- strcpy(fullname, prefix);
- }
-
- strcat(fullname, "/");
- strcat(fullname, base_name);
- } else {
- /* Buffer is too small */
- return NULL;
- }
+ /* If the prefix specifies the HDF5_PARAPREFIX directory, then
+ * default to using the "/tmp/$USER" or "/tmp/$LOGIN"
+ * directory instead. */
+ register char *user, *login, *subdir;
+
+ user = getenv("USER");
+ login = getenv("LOGIN");
+ subdir = (user ? user : login);
+
+ if (subdir) {
+ for (i = 0; i < size && prefix[i]; i++)
+ fullname[i] = prefix[i];
+
+ fullname[i++] = '/';
+
+ for (j = 0; i < size && subdir[j]; i++, j++)
+ fullname[i] = subdir[j];
+ } else {
+ /* We didn't append the prefix yet */
+ strncpy(fullname, prefix, MIN(strlen(prefix), size));
+ }
+
+ if ((strlen(fullname) + strlen(base_name) + 1) < size) {
+ /* Append the base_name with a slash first. Multiple slashes are
+ * handled below. */
+ h5_stat_t buf;
+
+ if (HDstat(fullname, &buf) < 0)
+ /* The directory doesn't exist just yet */
+ if (mkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST) {
+ /* We couldn't make the "/tmp/${USER,LOGIN}" subdirectory.
+ * Default to PREFIX's original prefix value. */
+ strcpy(fullname, prefix);
+ }
+
+ strcat(fullname, "/");
+ strcat(fullname, base_name);
+ } else {
+ /* Buffer is too small */
+ return NULL;
+ }
} else if (strlen(base_name) >= size) {
- /* Buffer is too small */
- return NULL;
+ /* Buffer is too small */
+ return NULL;
} else {
- strcpy(fullname, base_name);
+ strcpy(fullname, base_name);
}
/* Append a suffix */
if (suffix) {
- if (strlen(fullname) + strlen(suffix) >= size)
- return NULL;
+ if (strlen(fullname) + strlen(suffix) >= size)
+ return NULL;
- strcat(fullname, suffix);
+ strcat(fullname, suffix);
}
/* Remove any double slashes in the filename */
for (ptr = fullname, i = j = 0; ptr && i < size; i++, ptr++) {
- if (*ptr != '/' || last != '/')
- fullname[j++] = *ptr;
+ if (*ptr != '/' || last != '/')
+ fullname[j++] = *ptr;
- last = *ptr;
+ last = *ptr;
}
return fullname;
}
/*
- * Function: do_write
- * Purpose: Write the required amount of data to the file.
- * Return: SUCCESS or FAIL
- * Programmer: Albert Cheng, Bill Wendling, 2001/12/13
+ * Function: do_write
+ * Purpose: Write the required amount of data to the file.
+ * Return: SUCCESS or FAIL
+ * Programmer: Albert Cheng, Bill Wendling, 2001/12/13
* Modifications:
* Added 2D testing (Christian Chilan, 10. August 2005)
*/
static herr_t
do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
- off_t nbytes, size_t buf_size, void *buffer)
+ off_t nbytes, size_t buf_size, void *buffer)
{
int ret_code = SUCCESS;
int rc; /*routine return code */
long ndset;
size_t blk_size; /* The block size to subdivide the xfer buffer into */
off_t nbytes_xfer; /* Total number of bytes transferred so far */
+ size_t nbytes_xfer_advance; /* Number of bytes transferred in a single I/O operation */
size_t nbytes_toxfer; /* Number of bytes to transfer a particular time */
char dname[64];
off_t dset_offset=0; /*dataset offset in a file */
off_t bytes_begin[2]; /*first elmt this process transfer */
off_t bytes_count; /*number of elmts this process transfer */
- off_t snbytes=0; /*size of a side of the dataset square */
+ off_t snbytes=0; /*size of a side of the dataset square */
unsigned char *buf_p; /* Current buffer pointer */
/* POSIX variables */
off_t file_offset; /* File offset of the next transfer */
+ off_t file_offset_advance; /* File offset advance after each I/O operation */
off_t posix_file_offset; /* Base file offset of the next transfer */
/* MPI variables */
- MPI_Offset mpi_file_offset;/* Base file offset of the next transfer*/
- MPI_Offset mpi_offset; /* Offset in MPI file */
- MPI_Datatype mpi_file_type; /* MPI derived type for 1D file */
- MPI_Datatype mpi_blk_type; /* MPI derived type for 1D buffer */
- MPI_Datatype mpi_cont_type; /* MPI derived type for 2D contiguous file */
- MPI_Datatype contig_cont; /* MPI derived type for 2D contiguous buffer */
- MPI_Datatype mpi_inter_type;/* MPI derived type for 2D interleaved file */
- MPI_Datatype contig_inter; /* MPI derived type for 2D interleaved buffer*/
- MPI_Status mpi_status;
- int mrc; /* MPI return code */
+ MPI_Offset mpi_file_offset; /* Base file offset of the next transfer*/
+ MPI_Offset mpi_offset; /* Offset in MPI file */
+ MPI_Offset mpi_offset_advance; /* Offset advance after each I/O operation */
+ MPI_Datatype mpi_file_type; /* MPI derived type for 1D file */
+ MPI_Datatype mpi_blk_type; /* MPI derived type for 1D buffer */
+ MPI_Datatype mpi_cont_type; /* MPI derived type for 2D contiguous file */
+ MPI_Datatype mpi_partial_buffer_cont; /* MPI derived type for partial 2D contiguous buffer */
+ MPI_Datatype mpi_inter_type; /* MPI derived type for 2D interleaved file */
+ MPI_Datatype mpi_partial_buffer_inter; /* MPI derived type for partial 2D interleaved buffer */
+ MPI_Datatype mpi_full_buffer; /* MPI derived type for 2D full buffer */
+ MPI_Datatype mpi_full_chunk; /* MPI derived type for 2D full chunk */
+ MPI_Datatype mpi_chunk_inter_type; /* MPI derived type for 2D chunk interleaved file */
+ MPI_Datatype mpi_collective_type; /* Generic MPI derived type for 2D collective access */
+ MPI_Status mpi_status;
+ int mrc; /* MPI return code */
/* HDF5 variables */
herr_t hrc; /*HDF5 return code */
@@ -544,11 +560,11 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
hid_t h5dset_space_id = -1; /*dataset space ID */
hid_t h5mem_space_id = -1; /*memory dataspace ID */
hid_t h5ds_id = -1; /*dataset handle */
- hsize_t h5block[2]; /*dataspace selection */
- hsize_t h5stride[2];
- hsize_t h5count[2];
- hsize_t h5start[2];
- hssize_t h5offset[2]; /* Selection offset within dataspace */
+ hsize_t h5block[2]; /*dataspace selection */
+ hsize_t h5stride[2];
+ hsize_t h5count[2];
+ hsize_t h5start[2];
+ hssize_t h5offset[2]; /* Selection offset within dataspace */
hid_t h5dcpl = -1; /* Dataset creation property list */
hid_t h5dxpl = -1; /* Dataset transfer property list */
@@ -566,41 +582,47 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
*/
/* 1D dataspace */
- if (parms->dim2d==0){
- if (parms->interleaved==0) {
- /* Contiguous Pattern: */
- bytes_begin[0] = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
- } /* end if */
- else {
- /* Interleaved Pattern: */
- bytes_begin[0] = (off_t)(blk_size*pio_mpi_rank_g);
- } /* end else */
- /* Prepare buffer for verifying data */
- if (parms->verify)
- memset(buffer,pio_mpi_rank_g+1,buf_size);
+ if (!parms->dim2d){
+ /* Contiguous Pattern: */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
+ } /* end if */
+ /* Interleaved Pattern: */
+ else {
+ bytes_begin[0] = (off_t)(blk_size*pio_mpi_rank_g);
+ } /* end else */
+
+ /* Prepare buffer for verifying data */
+ if (parms->verify)
+ memset(buffer,pio_mpi_rank_g+1,buf_size);
}/* end if */
/* 2D dataspace */
else {
- snbytes = (off_t)sqrt(nbytes);
- /* nbytes is always the number of bytes per dataset (1D or 2D). If the
- dataspace is 2D, snbytes is the size of a side of the dataset square.
- */
- if (parms->interleaved==0) {
- /* Contiguous Pattern: */
- bytes_begin[0] = (off_t)((double)snbytes*pio_mpi_rank_g / pio_mpi_nprocs_g);
- bytes_begin[1] = 0;
- } /* end if */
- else {
- /* Interleaved Pattern: */
- bytes_begin[0] = 0;
- bytes_begin[1] = (off_t)(blk_size*pio_mpi_rank_g);
- } /* end else */
- /* Prepare buffer for verifying data */
- if (parms->verify)
- memset(buffer,pio_mpi_rank_g+1,buf_size*blk_size);
- }
+ /* nbytes is always the number of bytes per dataset (1D or 2D). If the
+ dataspace is 2D, snbytes is the size of a side of the dataset square.
+ */
+ snbytes = (off_t)sqrt(nbytes);
+
+ /* Contiguous Pattern: */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)((double)snbytes*pio_mpi_rank_g / pio_mpi_nprocs_g);
+ bytes_begin[1] = 0;
+ } /* end if */
+ /* Interleaved Pattern: */
+ else {
+ bytes_begin[0] = 0;
+
+ if(!parms->h5_use_chunks || parms->io_type==PHDF5)
+ bytes_begin[1] = (off_t)(blk_size*pio_mpi_rank_g);
+ else
+ bytes_begin[1] = (off_t)(blk_size*blk_size*pio_mpi_rank_g);
+ } /* end else */
+
+ /* Prepare buffer for verifying data */
+ if (parms->verify)
+ memset(buffer,pio_mpi_rank_g+1,buf_size*blk_size);
+ } /* end else */
- /* end else */
/* Calculate the total number of bytes (bytes_count) to be
* transferred by this process. It may be different for different
@@ -613,798 +635,897 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
* (This is tricky, don't mess with the formula, rounding errors
* can easily get introduced) */
bytes_count = (off_t)(((double)nbytes*(pio_mpi_rank_g+1)) / pio_mpi_nprocs_g)
- - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
+ - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
/* debug */
if (pio_debug_level >= 4) {
- HDprint_rank(output);
- HDfprintf(output, "Debug(do_write): "
- "buf_size=%Hd, bytes_begin=%Hd, bytes_count=%Hd\n",
- (long_long)buf_size, (long_long)bytes_begin,
- (long_long)bytes_count);
+ HDprint_rank(output);
+ HDfprintf(output, "Debug(do_write): "
+ "buf_size=%Hd, bytes_begin=%Hd, bytes_count=%Hd\n",
+ (long_long)buf_size, (long_long)bytes_begin,
+ (long_long)bytes_count);
}
/* I/O Access specific setup */
switch (parms->io_type) {
- case POSIXIO:
- /* No extra setup */
- break;
-
- case MPIO: /* MPI-I/O setup */
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Build block's derived type */
- mrc = MPI_Type_contiguous((int)blk_size,
- MPI_BYTE, &mpi_blk_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Build file's derived type */
- mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1,
- (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Commit file type */
- mrc = MPI_Type_commit( &mpi_file_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
-
- /* Commit buffer type */
- mrc = MPI_Type_commit( &mpi_blk_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- } /* end if */
- /* 2D dataspace */
- else {
- /* Build partial buffer derived type for contiguous access */
- mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
- &contig_cont);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Build contiguous file's derived type */
- mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
- contig_cont, &mpi_cont_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Commit contiguous file type */
- mrc = MPI_Type_commit(&mpi_cont_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
-
- /* Build partial buffer derived type for interleaved access */
- mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
- &contig_inter);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Build interleaved file's derived type */
- mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
- contig_inter, &mpi_inter_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Commit interleaved file type */
- mrc = MPI_Type_commit(&mpi_inter_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- } /* end else */
- break;
-
- case PHDF5: /* HDF5 setup */
- /* 1D dataspace */
- if (parms->dim2d==0){
- if(nbytes>0) {
- /* define a contiguous dataset of nbytes native bytes */
- h5dims[0] = nbytes;
- h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
- VRFY((h5dset_space_id >= 0), "H5Screate_simple");
-
- /* Set up the file dset space id to select the pattern to access */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5start[0] = bytes_begin[0];
- h5stride[0] = h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5start[0] = bytes_begin[0];
- h5stride[0] = blk_size*pio_mpi_nprocs_g;
- h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
- } /* end else */
- hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
- VRFY((hrc >= 0), "H5Sselect_hyperslab");
- } /* end if */
- else {
- h5dset_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5dset_space_id >= 0), "H5Screate");
- } /* end else */
-
- /* Create the memory dataspace that corresponds to the xfer buffer */
- if(buf_size>0) {
- h5dims[0] = buf_size;
- h5mem_space_id = H5Screate_simple(1, h5dims, NULL);
- VRFY((h5mem_space_id >= 0), "H5Screate_simple");
- } /* end if */
- else {
- h5mem_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5mem_space_id >= 0), "H5Screate");
- } /* end else */
- } /* end if */
- /* 2D dataspace */
- else {
- if(nbytes>0) {
- /* define a contiguous dataset of nbytes native bytes */
- h5dims[0] = snbytes;
- h5dims[1] = snbytes;
- h5dset_space_id = H5Screate_simple(2, h5dims, NULL);
- VRFY((h5dset_space_id >= 0), "H5Screate_simple");
-
- /* Set up the file dset space id to select the pattern to access */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5start[0] = bytes_begin[0];
- h5start[1] = bytes_begin[1];
- h5stride[0] = 1;
- h5stride[1] = h5block[0] = h5block[1] = blk_size;
- h5count[0] = 1;
- h5count[1] = buf_size/blk_size;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5start[0] = bytes_begin[0];
- h5start[1] = bytes_begin[1];
- h5stride[0] = blk_size;
- h5stride[1] = blk_size*pio_mpi_nprocs_g;
- h5block[0] = h5block[1] = blk_size;
- h5count[0] = buf_size/blk_size;
- h5count[1] = 1;
- } /* end else */
- hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
- VRFY((hrc >= 0), "H5Sselect_hyperslab");
- } /* end if */
- else {
- h5dset_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5dset_space_id >= 0), "H5Screate");
- } /* end else */
-
- /* Create the memory dataspace that corresponds to the xfer buffer */
- if(buf_size>0) {
- if (parms->interleaved==0){
- h5dims[0] = blk_size;
- h5dims[1] = buf_size;
- }else{
- h5dims[0] = buf_size;
- h5dims[1] = blk_size;
- }
- h5mem_space_id = H5Screate_simple(2, h5dims, NULL);
- VRFY((h5mem_space_id >= 0), "H5Screate_simple");
- } /* end if */
- else {
- h5mem_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5mem_space_id >= 0), "H5Screate");
- } /* end else */
- } /* end else */
-
- /* Create the dataset transfer property list */
- h5dxpl = H5Pcreate(H5P_DATASET_XFER);
- if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
- }
-
- /* Change to collective I/O, if asked */
- if(parms->collective) {
- hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- } /* end if */
- } /* end if */
- break;
+ case POSIXIO:
+ /* No extra setup */
+ break;
+
+ case MPIO: /* MPI-I/O setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Build block's derived type */
+ mrc = MPI_Type_contiguous((int)blk_size,
+ MPI_BYTE, &mpi_blk_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Build file's derived type */
+ mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1,
+ (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit file type */
+ mrc = MPI_Type_commit( &mpi_file_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Commit buffer type */
+ mrc = MPI_Type_commit( &mpi_blk_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Build partial buffer derived type for contiguous access */
+
+ mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
+ &mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build contiguous file's derived type */
+ mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
+ mpi_partial_buffer_cont, &mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit contiguous file type */
+ mrc = MPI_Type_commit(&mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build partial buffer derived type for interleaved access */
+ mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
+ &mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build interleaved file's derived type */
+ mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
+ mpi_partial_buffer_inter, &mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit interleaved file type */
+ mrc = MPI_Type_commit(&mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full buffer derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*buf_size), MPI_BYTE,
+ &mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full buffer derived type */
+ mrc = MPI_Type_commit(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full chunk derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*blk_size), MPI_BYTE,
+ &mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full chunk derived type */
+ mrc = MPI_Type_commit(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build chunk interleaved file's derived type */
+ mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1, (int)(snbytes/blk_size),
+ mpi_full_chunk, &mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit chunk interleaved file type */
+ mrc = MPI_Type_commit(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ } /* end else */
+ break;
+
+ case PHDF5: /* HDF5 setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = nbytes;
+ h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ } /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
+
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ h5dims[0] = buf_size;
+ h5mem_space_id = H5Screate_simple(1, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = snbytes;
+ h5dims[1] = snbytes;
+ h5dset_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = 1;
+ h5stride[1] = h5block[0] = h5block[1] = blk_size;
+ h5count[0] = 1;
+ h5count[1] = buf_size/blk_size;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = blk_size;
+ h5stride[1] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = h5block[1] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ h5count[1] = 1;
+ } /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
+
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ if (!parms->interleaved){
+ h5dims[0] = blk_size;
+ h5dims[1] = buf_size;
+ }else{
+ h5dims[0] = buf_size;
+ h5dims[1] = blk_size;
+ }
+ h5mem_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end else */
+
+ /* Create the dataset transfer property list */
+ h5dxpl = H5Pcreate(H5P_DATASET_XFER);
+ if (h5dxpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ /* Change to collective I/O, if asked */
+ if(parms->collective) {
+ hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ } /* end if */
+ } /* end if */
+ break;
} /* end switch */
for (ndset = 1; ndset <= ndsets; ++ndset) {
- /* Calculate dataset offset within a file */
-
- /* create dataset */
- switch (parms->io_type) {
- case POSIXIO:
- case MPIO:
- /* both posix and mpi io just need dataset offset in file*/
- dset_offset = (ndset - 1) * nbytes;
- break;
-
- case PHDF5:
- h5dcpl = H5Pcreate(H5P_DATASET_CREATE);
- if (h5dcpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
- }
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Make the dataset chunked if asked */
- if(parms->h5_use_chunks) {
- /* Set the chunk size to be the same as the buffer size */
- h5dims[0] = blk_size;
- hrc = H5Pset_chunk(h5dcpl, 1, h5dims);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- } /* end if */
- } /* end if */
- }/* end if */
- else{
- /* 2D dataspace */
- if(parms->h5_use_chunks) {
- /* Set the chunk size to be the same as the block size */
- h5dims[0] = blk_size;
- h5dims[1] = blk_size;
- hrc = H5Pset_chunk(h5dcpl, 2, h5dims);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- } /* end if */
- } /* end if */
- }/* end else */
-
- sprintf(dname, "Dataset_%ld", ndset);
- h5ds_id = H5Dcreate2(fd->h5fd, dname, ELMT_H5_TYPE,
- h5dset_space_id, H5P_DEFAULT, h5dcpl, H5P_DEFAULT);
-
- if(h5ds_id < 0) {
- fprintf(stderr, "HDF5 Dataset Create failed\n");
- GOTOERROR(FAIL);
- }
-
- hrc = H5Pclose(h5dcpl);
- /* verifying the close of the dcpl */
- if(hrc < 0) {
- fprintf(stderr, "HDF5 Property List Close failed\n");
- GOTOERROR(FAIL);
- }
-
- break;
- }
-
- /* The task is to transfer bytes_count bytes, starting at
- * bytes_begin position, using transfer buffer of buf_size bytes.
- * If interleaved, select buf_size at a time, in round robin
- * fashion, according to number of process. Otherwise, select
- * all bytes_count in contiguous.
- */
- nbytes_xfer = 0 ;
-
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Set base file offset for all I/O patterns and POSIX access */
- posix_file_offset = dset_offset + bytes_begin[0];
-
- /* Set base file offset for all I/O patterns and MPI access */
- mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin[0]);
- } /* end if */
- else {
- /* Set base file offset for all I/O patterns and POSIX access */
- posix_file_offset=dset_offset + bytes_begin[0]*snbytes+
- bytes_begin[1];
-
- /* Set base file offset for all I/O patterns and MPI access */
- mpi_file_offset=(MPI_Offset)(dset_offset + bytes_begin[0]*snbytes+
- bytes_begin[1]);
- } /* end else */
-
- /* Start "raw data" write timer */
- set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, START);
-
- while (nbytes_xfer < bytes_count){
- /* Write */
- /* Calculate offset of write within a dataset/file */
- switch (parms->io_type) {
- case POSIXIO:
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Contiguous pattern */
- if (parms->interleaved==0) {
- /* Compute file offset */
- file_offset = posix_file_offset + (off_t)nbytes_xfer;
-
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are written */
- rc = ((ssize_t)buf_size ==
- POSIXWRITE(fd->posixfd, buffer, buf_size));
- VRFY((rc != 0), "POSIXWRITE");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end if */
- /* Interleaved access pattern */
- else {
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size;
-
- /* Loop over the buffers to write */
- while(nbytes_toxfer>0) {
- /* Skip offset over blocks of other processes */
- file_offset = posix_file_offset +
- (off_t)(nbytes_xfer*pio_mpi_nprocs_g);
-
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are written */
- rc = ((ssize_t)blk_size ==
- POSIXWRITE(fd->posixfd, buf_p, blk_size));
- VRFY((rc != 0), "POSIXWRITE");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
- } /* end while */
- } /* end else */
- } /* end if */
- /* 2D dataspace */
- else {
- /* Contiguous pattern */
- if (parms->interleaved==0){
- /* Set the base of user's buffer */
- buf_p = (unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size*blk_size;
-
- /* Compute file offset */
- file_offset=posix_file_offset+(off_t)(((nbytes_xfer/blk_size)/(snbytes))*
- (blk_size*snbytes)+((nbytes_xfer/blk_size)%(snbytes)));
-
- /* Loop over portions of the buffer to write */
- while(nbytes_toxfer>0){
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are written */
- rc = ((ssize_t)buf_size ==
- POSIXWRITE(fd->posixfd, buffer, buf_size));
- VRFY((rc != 0), "POSIXWRITE");
-
- /* Advance location in buffer */
- buf_p+=buf_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=buf_size;
-
- /* Partially advance file offset */
- file_offset+=(off_t)(snbytes);
- } /* end while */
- } /* end if */
- /* Interleaved access pattern */
- else{
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer=buf_size*blk_size;
-
- /* Compute file offset */
- file_offset=posix_file_offset+(off_t)(((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/(snbytes)*
- (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%(snbytes));
-
- /* Loop over portions of the buffer to write */
- while(nbytes_toxfer>0){
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are written */
- rc = ((ssize_t)blk_size ==
- POSIXWRITE(fd->posixfd, buffer, blk_size));
- VRFY((rc != 0), "POSIXWRITE");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
-
- /* Partially advance file offset */
- file_offset+=(off_t)(snbytes);
- } /* end while */
- }/* end else */
- } /* end else */
- break;
-
- case MPIO:
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Independent file access */
- if(parms->collective==0) {
- /* Contiguous pattern */
- if (parms->interleaved==0){
- /* Compute offset in file */
- mpi_offset = mpi_file_offset +
- nbytes_xfer;
-
- /* Perform independent write */
- mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type,
- &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end if */
- /* Interleaved access pattern */
- else {
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size;
-
- /* Loop over the buffers to write */
- while(nbytes_toxfer>0) {
- /* Skip offset over blocks of other processes */
- mpi_offset = mpi_file_offset +
- (nbytes_xfer*pio_mpi_nprocs_g);
-
- /* Perform independent write */
- mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p,
- (int)1, mpi_blk_type, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
- } /* end while */
- } /* end else */
- } /* end if */
- /* Collective file access */
- else {
- /* Contiguous access pattern */
- if (parms->interleaved==0){
- /* Compute offset in file */
- mpi_offset = mpi_file_offset +
- nbytes_xfer;
-
- /* Perform independent write */
- mrc = MPI_File_write_at_all(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end if */
- /* Interleaved access pattern */
- else {
- /* Compute offset in file */
- mpi_offset = mpi_file_offset +
- (nbytes_xfer*pio_mpi_nprocs_g);
-
- /* Set the file view */
- mrc = MPI_File_set_view(fd->mpifd, mpi_offset, mpi_blk_type,
- mpi_file_type, (char*)"native", h5_io_info_g);
- VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
-
- /* Perform write */
- mrc = MPI_File_write_at_all(fd->mpifd, 0, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end else */
- } /* end else */
- } /* end if */
- /* 2D dataspace */
- else {
- /* Independent file access */
- if (parms->collective==0)
- /* Contiguous pattern */
- if (parms->interleaved==0){
- /* Set the base of user's buffer */
- buf_p = (unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size*blk_size;
-
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/(snbytes))*
- (blk_size*snbytes)+((nbytes_xfer/blk_size)%(snbytes));
-
- /* Loop over portions of the buffer to write */
- while(nbytes_toxfer>0){
-
- /* Perform independent write */
- mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p, (int)buf_size, MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- /* Advance location in buffer */
- buf_p+=buf_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=buf_size;
-
- /* Partially advance global offset in dataset */
- mpi_offset+=snbytes;
- } /* end while */
- } /* end if */
- /* Interleaved access pattern */
- else{
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer=buf_size*blk_size;
-
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/(snbytes)*
- (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%(snbytes);
-
- /* Loop over portions of the buffer to write */
- while(nbytes_toxfer>0){
- /* Perform independent write */
- mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p, (int)blk_size, MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
-
- /* Partially advance global offset in dataset */
- mpi_offset+=snbytes;
- } /* end while */
- } /* end else */
- /* end if */
- /* Collective file access */
- else
- /* Contiguous access pattern */
- if (parms->interleaved==0){
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/(snbytes))*
- (blk_size*snbytes)+((nbytes_xfer/blk_size)%(snbytes));
-
- /* Set the file view */
- mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE, mpi_cont_type, (char *)"native", h5_io_info_g);
- VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
-
- /* Perform write */
- MPI_File_write_at_all(fd->mpifd, 0, buffer,(int)(buf_size*blk_size),MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- nbytes_xfer+=buf_size*blk_size;
- } /* end if */
- /* Interleaved access pattern */
- else{
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/(snbytes)*
- (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%(snbytes);
-
- /* Set the file view */
- mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE, mpi_inter_type, (char *)"native", h5_io_info_g);
- VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
-
- /* Perform write */
- MPI_File_write_at_all(fd->mpifd, 0, buffer, (int)(buf_size*blk_size), MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
-
- nbytes_xfer+=buf_size*blk_size;
- } /* end else */
- /* end else */
- } /* end else */
- break;
-
- case PHDF5:
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Set up the file dset space id to move the selection to process */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5offset[0] = nbytes_xfer;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
- } /* end else */
- hrc = H5Soffset_simple(h5dset_space_id, h5offset);
- VRFY((hrc >= 0), "H5Soffset_simple");
-
- /* Write the buffer out */
- hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
- h5dset_space_id, h5dxpl, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
-
- /* Increment number of bytes transferred */
- nbytes_xfer += buf_size;
- } /* end if */
- /* 2D dataspace */
- else {
- /* Set up the file dset space id to move the selection to process */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5offset[0] = (nbytes_xfer/(snbytes*blk_size))*blk_size;
- h5offset[1] = (nbytes_xfer%(snbytes*blk_size))/blk_size;
-
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5offset[0] = ((nbytes_xfer*pio_mpi_nprocs_g)/(snbytes*buf_size))*buf_size;
- h5offset[1] = ((nbytes_xfer*pio_mpi_nprocs_g)%(snbytes*buf_size))/buf_size;
-
- } /* end else */
- hrc = H5Soffset_simple(h5dset_space_id, h5offset);
- VRFY((hrc >= 0), "H5Soffset_simple");
-
- /* Write the buffer out */
- hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
- h5dset_space_id, h5dxpl, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
-
- /* Increment number of bytes transferred */
- nbytes_xfer += buf_size*blk_size;
-
- } /* end else */
-
- break;
- } /* switch (parms->io_type) */
- } /* end while */
-
- /* Stop "raw data" write timer */
- set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, STOP);
-
- /* Calculate write time */
-
- /* Close dataset. Only HDF5 needs to do an explicit close. */
- if (parms->io_type == PHDF5) {
- hrc = H5Dclose(h5ds_id);
-
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
- GOTOERROR(FAIL);
- }
-
- h5ds_id = -1;
- } /* end if */
+ /* Calculate dataset offset within a file */
+
+ /* create dataset */
+ switch (parms->io_type) {
+ case POSIXIO:
+ case MPIO:
+ /* both posix and mpi io just need dataset offset in file*/
+ dset_offset = (ndset - 1) * nbytes;
+ break;
+
+ case PHDF5:
+ h5dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ if (h5dcpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Make the dataset chunked if asked */
+ if(parms->h5_use_chunks) {
+ /* Set the chunk size to be the same as the buffer size */
+ h5dims[0] = blk_size;
+ hrc = H5Pset_chunk(h5dcpl, 1, h5dims);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ } /* end if */
+ } /* end if */
+ }/* end if */
+ else{
+ /* 2D dataspace */
+ if(parms->h5_use_chunks) {
+ /* Set the chunk size to be the same as the block size */
+ h5dims[0] = blk_size;
+ h5dims[1] = blk_size;
+ hrc = H5Pset_chunk(h5dcpl, 2, h5dims);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ } /* end if */
+ } /* end if */
+ }/* end else */
+
+ sprintf(dname, "Dataset_%ld", ndset);
+ h5ds_id = H5Dcreate2(fd->h5fd, dname, ELMT_H5_TYPE,
+ h5dset_space_id, H5P_DEFAULT, h5dcpl, H5P_DEFAULT);
+
+ if (h5ds_id < 0) {
+ fprintf(stderr, "HDF5 Dataset Create failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ hrc = H5Pclose(h5dcpl);
+ /* verifying the close of the dcpl */
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Close failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ break;
+ }
+
+ /* The task is to transfer bytes_count bytes, starting at
+ * bytes_begin position, using transfer buffer of buf_size bytes.
+ * If interleaved, select buf_size at a time, in round robin
+ * fashion, according to number of process. Otherwise, select
+ * all bytes_count in contiguous.
+ */
+ nbytes_xfer = 0 ;
+
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set base file offset for all I/O patterns and POSIX access */
+ posix_file_offset = dset_offset + bytes_begin[0];
+
+ /* Set base file offset for all I/O patterns and MPI access */
+ mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin[0]);
+ } /* end if */
+ else {
+ /* Set base file offset for all I/O patterns and POSIX access */
+ posix_file_offset=dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1];
+
+ /* Set base file offset for all I/O patterns and MPI access */
+ mpi_file_offset=(MPI_Offset)(dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1]);
+ } /* end else */
+
+ /* Start "raw data" write timer */
+ set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, START);
+
+ while (nbytes_xfer < bytes_count){
+ /* Write */
+ /* Calculate offset of write within a dataset/file */
+ switch (parms->io_type) {
+ case POSIXIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Contiguous pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset = posix_file_offset + (off_t)nbytes_xfer;
+
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are written */
+ rc = ((ssize_t)buf_size ==
+ POSIXWRITE(fd->posixfd, buffer, buf_size));
+ VRFY((rc != 0), "POSIXWRITE");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Set the base of user's buffer */
+ buf_p=(unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size;
+
+ /* Loop over the buffers to write */
+ while(nbytes_toxfer>0) {
+ /* Skip offset over blocks of other processes */
+ file_offset = posix_file_offset +
+ (off_t)(nbytes_xfer*pio_mpi_nprocs_g);
+
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are written */
+ rc = ((ssize_t)blk_size ==
+ POSIXWRITE(fd->posixfd, buf_p, blk_size));
+ VRFY((rc != 0), "POSIXWRITE");
+
+ /* Advance location in buffer */
+ buf_p+=blk_size;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=blk_size;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=blk_size;
+ } /* end while */
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/blk_size)
+ /snbytes)*(blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)((((nbytes_xfer/buf_size)
+ *pio_mpi_nprocs_g)/snbytes)*(buf_size*snbytes)
+ +((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end else */
+ } /* end if */
+ /* Chunked storage */
+ else {
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = 0;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ /* Before simplification */
+ /* file_offset=posix_file_offset+(off_t)((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*(buf_size/blk_size
+ *snbytes/blk_size*(blk_size*blk_size))+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes/blk_size*(blk_size*blk_size))); */
+
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes*blk_size));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* file_offset_advance = (off_t)(snbytes/blk_size*(blk_size*blk_size)); */
+ file_offset_advance = (off_t)(snbytes*blk_size);
+ } /* end else */
+ } /* end else */
+
+ /* Common code for file access */
+
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size*blk_size;
+
+ /* Loop over portions of the buffer to write */
+ while(nbytes_toxfer>0){
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are written */
+ rc = ((ssize_t)nbytes_xfer_advance ==
+ POSIXWRITE(fd->posixfd, buf_p, nbytes_xfer_advance));
+ VRFY((rc != 0), "POSIXWRITE");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance file offset */
+ file_offset+=file_offset_advance;
+ } /* end while */
+
+ } /* end else */
+
+ break;
+
+ case MPIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Independent file access */
+ if(!parms->collective) {
+ /* Contiguous pattern */
+ if (!parms->interleaved){
+ /* Compute offset in file */
+ mpi_offset = mpi_file_offset +
+ nbytes_xfer;
+
+ /* Perform independent write */
+ mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buffer,
+ (int)(buf_size/blk_size), mpi_blk_type,
+ &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Set the base of user's buffer */
+ buf_p=(unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size;
+
+ /* Loop over the buffers to write */
+ while(nbytes_toxfer>0) {
+ /* Skip offset over blocks of other processes */
+ mpi_offset = mpi_file_offset +
+ (nbytes_xfer*pio_mpi_nprocs_g);
+
+ /* Perform independent write */
+ mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p,
+ (int)1, mpi_blk_type, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance location in buffer */
+ buf_p+=blk_size;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=blk_size;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=blk_size;
+ } /* end while */
+ } /* end else */
+ } /* end if */
+ /* Collective file access */
+ else {
+ /* Contiguous access pattern */
+ if (!parms->interleaved){
+ /* Compute offset in file */
+ mpi_offset = mpi_file_offset +
+ nbytes_xfer;
+
+ /* Perform independent write */
+ mrc = MPI_File_write_at_all(fd->mpifd, mpi_offset, buffer,
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ mpi_offset = mpi_file_offset +
+ (nbytes_xfer*pio_mpi_nprocs_g);
+
+ /* Set the file view */
+ mrc = MPI_File_set_view(fd->mpifd, mpi_offset, mpi_blk_type,
+ mpi_file_type, (char*)"native", h5_io_info_g);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
+
+ /* Perform write */
+ mrc = MPI_File_write_at_all(fd->mpifd, 0, buffer,
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end else */
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/snbytes)*
+ (blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_cont_type;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+(((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/snbytes)*
+ (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_inter_type;
+ } /* end else */
+ } /* end if */
+ /* Chunked storage */
+ else {
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = 0;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_full_buffer;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ /* Before simplification */
+ /* mpi_offset=mpi_file_offset+(nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*
+ (buf_size/blk_size*snbytes/blk_size*(blk_size*blk_size))+
+ ((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes
+ /blk_size*(blk_size*blk_size)); */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)
+ +((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes*blk_size);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* mpi_offset_advance = (MPI_Offset)(snbytes/blk_size*(blk_size*blk_size)); */
+ mpi_offset_advance = (MPI_Offset)(snbytes*blk_size);
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_chunk_inter_type;
+ } /* end else */
+ } /* end else */
+
+ /* Common code for independent file access */
+ if (!parms->collective) {
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size * blk_size;
+
+ /* Loop over portions of the buffer to write */
+ while(nbytes_toxfer>0){
+ /* Perform independent write */
+ mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p,
+ (int)nbytes_xfer_advance, MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance global offset in dataset */
+ mpi_offset+=mpi_offset_advance;
+ } /* end while */
+ } /* end if */
+
+ /* Common code for collective file access */
+ else {
+ /* Set the file view */
+ mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE,
+ mpi_collective_type, (char *)"native", h5_io_info_g);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
+
+ /* Perform write */
+ MPI_File_write_at_all(fd->mpifd, 0, buffer,(int)(buf_size*blk_size),
+ MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size*blk_size;
+ } /* end else */
+
+ } /* end else */
+
+ break;
+
+ case PHDF5:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = nbytes_xfer;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Write the buffer out */
+ hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = (nbytes_xfer/(snbytes*blk_size))*blk_size;
+ h5offset[1] = (nbytes_xfer%(snbytes*blk_size))/blk_size;
+
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = ((nbytes_xfer*pio_mpi_nprocs_g)/(snbytes*buf_size))*buf_size;
+ h5offset[1] = ((nbytes_xfer*pio_mpi_nprocs_g)%(snbytes*buf_size))/buf_size;
+
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Write the buffer out */
+ hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size*blk_size;
+
+ } /* end else */
+
+ break;
+ } /* switch (parms->io_type) */
+ } /* end while */
+
+ /* Stop "raw data" write timer */
+ set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, STOP);
+
+ /* Calculate write time */
+
+ /* Close dataset. Only HDF5 needs to do an explicit close. */
+ if (parms->io_type == PHDF5) {
+ hrc = H5Dclose(h5ds_id);
+
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Close failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ h5ds_id = -1;
+ } /* end if */
} /* end for */
done:
/* release MPI-I/O objects */
if (parms->io_type == MPIO) {
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Free file type */
- mrc = MPI_Type_free( &mpi_file_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free buffer type */
- mrc = MPI_Type_free( &mpi_blk_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
- } /* end if */
- /* 2D dataspace */
- else {
- /* Free file type */
- mrc = MPI_Type_free( &mpi_cont_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free partial buffer type */
- mrc = MPI_Type_free( &contig_cont );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free file type */
- mrc = MPI_Type_free( &mpi_inter_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free partial buffer type */
- mrc = MPI_Type_free( &contig_inter );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
- } /* end else */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Free file type */
+ mrc = MPI_Type_free( &mpi_file_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free buffer type */
+ mrc = MPI_Type_free( &mpi_blk_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Free partial buffer type for contiguous access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_cont );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free contiguous file type */
+ mrc = MPI_Type_free( &mpi_cont_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free partial buffer type for interleaved access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_inter );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free interleaved file type */
+ mrc = MPI_Type_free( &mpi_inter_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full buffer type */
+ mrc = MPI_Type_free(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full chunk type */
+ mrc = MPI_Type_free(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free chunk interleaved file type */
+ mrc = MPI_Type_free(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end else */
} /* end if */
/* release HDF5 objects */
if (h5dset_space_id != -1) {
- hrc = H5Sclose(h5dset_space_id);
- if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5dset_space_id = -1;
- }
+ hrc = H5Sclose(h5dset_space_id);
+ if (hrc < 0){
+ fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dset_space_id = -1;
+ }
}
if (h5mem_space_id != -1) {
- hrc = H5Sclose(h5mem_space_id);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5mem_space_id = -1;
- }
+ hrc = H5Sclose(h5mem_space_id);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5mem_space_id = -1;
+ }
}
if (h5dxpl != -1) {
- hrc = H5Pclose(h5dxpl);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
- ret_code = FAIL;
- } else {
- h5dxpl = -1;
- }
+ hrc = H5Pclose(h5dxpl);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dxpl = -1;
+ }
}
return ret_code;
}
/*
- * Function: do_read
- * Purpose: read the required amount of data from the file.
- * Return: SUCCESS or FAIL
- * Programmer: Albert Cheng 2001/12/13
+ * Function: do_read
+ * Purpose: read the required amount of data from the file.
+ * Return: SUCCESS or FAIL
+ * Programmer: Albert Cheng 2001/12/13
* Modifications:
* Added 2D testing (Christian Chilan, 10. August 2005)
*/
static herr_t
do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
- off_t nbytes, size_t buf_size, void *buffer /*out*/)
+ off_t nbytes, size_t buf_size, void *buffer /*out*/)
{
int ret_code = SUCCESS;
int rc; /*routine return code */
long ndset;
size_t blk_size; /* The block size to subdivide the xfer buffer into */
- size_t bsize;
+ size_t bsize; /* Size of the actual buffer */
off_t nbytes_xfer; /* Total number of bytes transferred so far */
+ size_t nbytes_xfer_advance; /* Number of bytes transferred in a single I/O operation */
size_t nbytes_toxfer; /* Number of bytes to transfer a particular time */
char dname[64];
off_t dset_offset=0; /*dataset offset in a file */
off_t bytes_begin[2]; /*first elmt this process transfer */
off_t bytes_count; /*number of elmts this process transfer */
- off_t snbytes=0; /*size of a side of the dataset square */
+ off_t snbytes=0; /*size of a side of the dataset square */
unsigned char *buf_p; /* Current buffer pointer */
/* POSIX variables */
off_t file_offset; /* File offset of the next transfer */
+ off_t file_offset_advance; /* File offset advance after each I/O operation */
off_t posix_file_offset; /* Base file offset of the next transfer */
/* MPI variables */
MPI_Offset mpi_file_offset;/* Base file offset of the next transfer*/
- MPI_Offset mpi_offset; /* Offset in MPI file */
+ MPI_Offset mpi_offset; /* Offset in MPI file */
+ MPI_Offset mpi_offset_advance; /* Offset advance after each I/O operation */
MPI_Datatype mpi_file_type; /* MPI derived type for 1D file */
MPI_Datatype mpi_blk_type; /* MPI derived type for 1D buffer */
MPI_Datatype mpi_cont_type; /* MPI derived type for 2D contiguous file */
- MPI_Datatype contig_cont; /* MPI derived type for 2D contiguous buffer */
- MPI_Datatype mpi_inter_type;/* MPI derived type for 2D interleaved file */
- MPI_Datatype contig_inter; /* MPI derived type for 2D interleaved buffer*/
- MPI_Status mpi_status;
+ MPI_Datatype mpi_partial_buffer_cont; /* MPI derived type for partial 2D contiguous buffer */
+ MPI_Datatype mpi_inter_type; /* MPI derived type for 2D interleaved file */
+ MPI_Datatype mpi_partial_buffer_inter; /* MPI derived type for partial 2D interleaved buffer */
+ MPI_Datatype mpi_full_buffer; /* MPI derived type for 2D full buffer */
+ MPI_Datatype mpi_full_chunk; /* MPI derived type for 2D full chunk */
+ MPI_Datatype mpi_chunk_inter_type; /* MPI derived type for 2D chunk interleaved file */
+ MPI_Datatype mpi_collective_type; /* Generic MPI derived type for 2D collective access */
+ MPI_Status mpi_status;
int mrc; /* MPI return code */
/* HDF5 variables */
@@ -1413,11 +1534,11 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
hid_t h5dset_space_id = -1; /*dataset space ID */
hid_t h5mem_space_id = -1; /*memory dataspace ID */
hid_t h5ds_id = -1; /*dataset handle */
- hsize_t h5block[2]; /*dataspace selection */
- hsize_t h5stride[2];
- hsize_t h5count[2];
- hsize_t h5start[2];
- hssize_t h5offset[2]; /* Selection offset within dataspace */
+ hsize_t h5block[2]; /*dataspace selection */
+ hsize_t h5stride[2];
+ hsize_t h5count[2];
+ hsize_t h5start[2];
+ hssize_t h5offset[2]; /* Selection offset within dataspace */
hid_t h5dxpl = -1; /* Dataset transfer property list */
/* Get the parameters from the parameter block */
@@ -1434,38 +1555,41 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
*/
/* 1D dataspace */
- if (parms->dim2d==0){
- bsize = buf_size;
- if (parms->interleaved==0) {
- /* Contiguous Pattern: */
- bytes_begin[0] = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
- } /* end if */
- else {
- /* Interleaved Pattern: */
- bytes_begin[0] = (off_t)(blk_size*pio_mpi_rank_g);
- } /* end else */
+ if (!parms->dim2d){
+ bsize = buf_size;
+ /* Contiguous Pattern: */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
+ } /* end if */
+ /* Interleaved Pattern: */
+ else {
+ bytes_begin[0] = (off_t)(blk_size*pio_mpi_rank_g);
+ } /* end else */
}/* end if */
/* 2D dataspace */
else {
- snbytes = (off_t)sqrt(nbytes);
- bsize = buf_size * blk_size;
- /* nbytes is always the number of bytes per dataset (1D or 2D). If the
- dataspace is 2D, snbytes is the size of a side of the 'dataset square'.
- */
- if (parms->interleaved==0) {
- /* Contiguous Pattern: */
- bytes_begin[0] = (off_t)((double)snbytes*pio_mpi_rank_g / pio_mpi_nprocs_g);
- bytes_begin[1] = 0;
- } /* end if */
- else {
- /* Interleaved Pattern: */
- bytes_begin[0] = 0;
- bytes_begin[1] = (off_t)(blk_size*pio_mpi_rank_g);
- } /* end else */
- }
-
- /* end else */
-
+ /* nbytes is always the number of bytes per dataset (1D or 2D). If the
+ dataspace is 2D, snbytes is the size of a side of the 'dataset square'.
+ */
+ snbytes = (off_t)sqrt(nbytes);
+
+ bsize = buf_size * blk_size;
+
+ /* Contiguous Pattern: */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)((double)snbytes*pio_mpi_rank_g / pio_mpi_nprocs_g);
+ bytes_begin[1] = 0;
+ } /* end if */
+ /* Interleaved Pattern: */
+ else {
+ bytes_begin[0] = 0;
+
+ if (!parms->h5_use_chunks || parms->io_type==PHDF5)
+ bytes_begin[1] = (off_t)(blk_size*pio_mpi_rank_g);
+ else
+ bytes_begin[1] = (off_t)(blk_size*blk_size*pio_mpi_rank_g);
+ } /* end else */
+ } /* end else */
/* Calculate the total number of bytes (bytes_count) to be
* transferred by this process. It may be different for different
@@ -1478,737 +1602,826 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
* (This is tricky, don't mess with the formula, rounding errors
* can easily get introduced) */
bytes_count = (off_t)(((double)nbytes*(pio_mpi_rank_g+1)) / pio_mpi_nprocs_g)
- - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
+ - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
/* debug */
if (pio_debug_level >= 4) {
- HDprint_rank(output);
- HDfprintf(output, "Debug(do_read): "
- "buf_size=%Hd, bytes_begin=%Hd, bytes_count=%Hd\n",
- (long_long)buf_size, (long_long)bytes_begin,
- (long_long)bytes_count);
+ HDprint_rank(output);
+ HDfprintf(output, "Debug(do_read): "
+ "buf_size=%Hd, bytes_begin=%Hd, bytes_count=%Hd\n",
+ (long_long)buf_size, (long_long)bytes_begin,
+ (long_long)bytes_count);
}
/* I/O Access specific setup */
switch (parms->io_type) {
- case POSIXIO:
- /* No extra setup */
- break;
-
- case MPIO: /* MPI-I/O setup */
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Build block's derived type */
- mrc = MPI_Type_contiguous((int)blk_size,
- MPI_BYTE, &mpi_blk_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Build file's derived type */
- mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1,
- (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Commit file type */
- mrc = MPI_Type_commit( &mpi_file_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
-
- /* Commit buffer type */
- mrc = MPI_Type_commit( &mpi_blk_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- } /* end if */
- /* 2D dataspace */
- else {
- /* Build partial buffer derived type for contiguous access */
- mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
- &contig_cont);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Build contiguous file's derived type */
- mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
- contig_cont, &mpi_cont_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Commit contiguous file type */
- mrc = MPI_Type_commit(&mpi_cont_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
-
- /* Build partial buffer derived type for interleaved access */
- mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
- &contig_inter);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Build interleaved file's derived type */
- mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
- contig_inter, &mpi_inter_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
-
- /* Commit interleaved buffer type */
- mrc = MPI_Type_commit(&mpi_inter_type);
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- } /* end else */
- break;
-
- case PHDF5: /* HDF5 setup */
- /* 1D dataspace */
- if (parms->dim2d==0){
- if(nbytes>0) {
- /* define a contiguous dataset of nbytes native bytes */
- h5dims[0] = nbytes;
- h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
- VRFY((h5dset_space_id >= 0), "H5Screate_simple");
-
- /* Set up the file dset space id to select the pattern to access */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5start[0] = bytes_begin[0];
- h5stride[0] = h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5start[0] = bytes_begin[0];
- h5stride[0] = blk_size*pio_mpi_nprocs_g;
- h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
- } /* end else */
- hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
- VRFY((hrc >= 0), "H5Sselect_hyperslab");
- } /* end if */
- else {
- h5dset_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5dset_space_id >= 0), "H5Screate");
- } /* end else */
-
- /* Create the memory dataspace that corresponds to the xfer buffer */
- if(buf_size>0) {
- h5dims[0] = buf_size;
- h5mem_space_id = H5Screate_simple(1, h5dims, NULL);
- VRFY((h5mem_space_id >= 0), "H5Screate_simple");
- } /* end if */
- else {
- h5mem_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5mem_space_id >= 0), "H5Screate");
- } /* end else */
- } /* end if */
- /* 2D dataspace */
- else {
- if(nbytes>0) {
- /* define a contiguous dataset of nbytes native bytes */
- h5dims[0] = snbytes;
- h5dims[1] = snbytes;
- h5dset_space_id = H5Screate_simple(2, h5dims, NULL);
- VRFY((h5dset_space_id >= 0), "H5Screate_simple");
-
- /* Set up the file dset space id to select the pattern to access */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5start[0] = bytes_begin[0];
- h5start[1] = bytes_begin[1];
- h5stride[0] = 1;
- h5stride[1] = h5block[0] = h5block[1] = blk_size;
- h5count[0] = 1;
- h5count[1] = buf_size/blk_size;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5start[0] = bytes_begin[0];
- h5start[1] = bytes_begin[1];
- h5stride[0] = blk_size;
- h5stride[1] = blk_size*pio_mpi_nprocs_g;
- h5block[0] = h5block[1] = blk_size;
- h5count[0] = buf_size/blk_size;
- h5count[1] = 1;
- } /* end else */
- hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
- VRFY((hrc >= 0), "H5Sselect_hyperslab");
- } /* end if */
- else {
- h5dset_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5dset_space_id >= 0), "H5Screate");
- } /* end else */
-
- /* Create the memory dataspace that corresponds to the xfer buffer */
- if(buf_size>0) {
- if (parms->interleaved==0){
- h5dims[0] = blk_size;
- h5dims[1] = buf_size;
- }else{
- h5dims[0] = buf_size;
- h5dims[1] = blk_size;
- }
- h5mem_space_id = H5Screate_simple(2, h5dims, NULL);
- VRFY((h5mem_space_id >= 0), "H5Screate_simple");
- } /* end if */
- else {
- h5mem_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5mem_space_id >= 0), "H5Screate");
- } /* end else */
- } /* end else */
-
- /* Create the dataset transfer property list */
- h5dxpl = H5Pcreate(H5P_DATASET_XFER);
- if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
- }
-
- /* Change to collective I/O, if asked */
- if(parms->collective) {
- hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- } /* end if */
- } /* end if */
- break;
+ case POSIXIO:
+ /* No extra setup */
+ break;
+
+ case MPIO: /* MPI-I/O setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Build block's derived type */
+ mrc = MPI_Type_contiguous((int)blk_size,
+ MPI_BYTE, &mpi_blk_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Build file's derived type */
+ mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1,
+ (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit file type */
+ mrc = MPI_Type_commit( &mpi_file_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Commit buffer type */
+ mrc = MPI_Type_commit( &mpi_blk_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Build partial buffer derived type for contiguous access */
+ mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
+ &mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build contiguous file's derived type */
+ mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
+ mpi_partial_buffer_cont, &mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit contiguous file type */
+ mrc = MPI_Type_commit(&mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build partial buffer derived type for interleaved access */
+ mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
+ &mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build interleaved file's derived type */
+ mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
+ mpi_partial_buffer_inter, &mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit interleaved file type */
+ mrc = MPI_Type_commit(&mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full buffer derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*buf_size), MPI_BYTE,
+ &mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full buffer derived type */
+ mrc = MPI_Type_commit(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full chunk derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*blk_size), MPI_BYTE,
+ &mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full chunk derived type */
+ mrc = MPI_Type_commit(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build chunk interleaved file's derived type */
+ mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1, (int)(snbytes/blk_size),
+ mpi_full_chunk, &mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit chunk interleaved file type */
+ mrc = MPI_Type_commit(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+ } /* end else */
+ break;
+
+ case PHDF5: /* HDF5 setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = nbytes;
+ h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ } /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
+
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ h5dims[0] = buf_size;
+ h5mem_space_id = H5Screate_simple(1, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = snbytes;
+ h5dims[1] = snbytes;
+ h5dset_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = 1;
+ h5stride[1] = h5block[0] = h5block[1] = blk_size;
+ h5count[0] = 1;
+ h5count[1] = buf_size/blk_size;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = blk_size;
+ h5stride[1] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = h5block[1] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ h5count[1] = 1;
+ } /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
+
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ if (!parms->interleaved){
+ h5dims[0] = blk_size;
+ h5dims[1] = buf_size;
+ }else{
+ h5dims[0] = buf_size;
+ h5dims[1] = blk_size;
+ }
+ h5mem_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end else */
+
+ /* Create the dataset transfer property list */
+ h5dxpl = H5Pcreate(H5P_DATASET_XFER);
+ if (h5dxpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ /* Change to collective I/O, if asked */
+ if(parms->collective) {
+ hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ } /* end if */
+ } /* end if */
+ break;
} /* end switch */
- for(ndset = 1; ndset <= ndsets; ++ndset) {
-
- /* Calculate dataset offset within a file */
-
- /* create dataset */
- switch(parms->io_type) {
- case POSIXIO:
- case MPIO:
- /* both posix and mpi io just need dataset offset in file*/
- dset_offset = (ndset - 1) * nbytes;
- break;
-
- case PHDF5:
- sprintf(dname, "Dataset_%ld", ndset);
- h5ds_id = H5Dopen2(fd->h5fd, dname, H5P_DEFAULT);
- if(h5ds_id < 0) {
- fprintf(stderr, "HDF5 Dataset open failed\n");
- GOTOERROR(FAIL);
- }
- break;
- }
-
- /* The task is to transfer bytes_count bytes, starting at
- * bytes_begin position, using transfer buffer of buf_size bytes.
- * If interleaved, select buf_size at a time, in round robin
- * fashion, according to number of process. Otherwise, select
- * all bytes_count in contiguous.
- */
- nbytes_xfer = 0 ;
-
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Set base file offset for all I/O patterns and POSIX access */
- posix_file_offset = dset_offset + bytes_begin[0];
-
- /* Set base file offset for all I/O patterns and MPI access */
- mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin[0]);
- } /* end if */
- else {
- /* Set base file offset for all I/O patterns and POSIX access */
- posix_file_offset=dset_offset + bytes_begin[0]*snbytes+
- bytes_begin[1];
-
- /* Set base file offset for all I/O patterns and MPI access */
- mpi_file_offset=(MPI_Offset)(dset_offset + bytes_begin[0]*snbytes+
- bytes_begin[1]);
- } /* end else */
-
- /* Start "raw data" read timer */
- set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, START);
-
- while (nbytes_xfer < bytes_count){
- /* Read */
- /* Calculate offset of read within a dataset/file */
- switch (parms->io_type) {
- case POSIXIO:
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Contiguous pattern */
- if (parms->interleaved==0) {
- /* Compute file offset */
- file_offset = posix_file_offset + (off_t)nbytes_xfer;
-
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are read */
- rc = ((ssize_t)buf_size ==
- POSIXREAD(fd->posixfd, buffer, buf_size));
- VRFY((rc != 0), "POSIXREAD");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end if */
- /* Interleaved access pattern */
- else {
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size;
-
- /* Loop over the buffers to read */
- while(nbytes_toxfer>0) {
- /* Skip offset over blocks of other processes */
- file_offset = posix_file_offset +
- (off_t)(nbytes_xfer*pio_mpi_nprocs_g);
-
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are read */
- rc = ((ssize_t)blk_size ==
- POSIXREAD(fd->posixfd, buf_p, blk_size));
- VRFY((rc != 0), "POSIXREAD");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
- } /* end while */
- } /* end else */
- } /* end if */
- /* 2D dataspace */
- else {
- /* Contiguous pattern */
- if (parms->interleaved==0){
- /* Set the base of user's buffer */
- buf_p = (unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size*blk_size;
-
- /* Compute file offset */
- file_offset=posix_file_offset+(off_t)(((nbytes_xfer/blk_size)/(snbytes))*
- (blk_size*snbytes)+((nbytes_xfer/blk_size)%(snbytes)));
-
- /* Loop over portions of the buffer to read */
- while(nbytes_toxfer>0){
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are read */
- rc = ((ssize_t)buf_size ==
- POSIXREAD(fd->posixfd, buffer, buf_size));
- VRFY((rc != 0), "POSIXREAD");
-
- /* Advance location in buffer */
- buf_p+=buf_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=buf_size;
-
- /* Partially advance file offset */
- file_offset+=(off_t)(snbytes);
- } /* end while */
- } /* end if */
- /* Interleaved access pattern */
- else{
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer=buf_size*blk_size;
-
- /* Compute file offset */
- file_offset=posix_file_offset+(off_t)(((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/(snbytes)*
- (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%(snbytes));
-
- /* Loop over portions of the buffer to read */
- while(nbytes_toxfer>0){
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
-
- /* check if all bytes are read */
- rc = ((ssize_t)blk_size ==
- POSIXREAD(fd->posixfd, buffer, blk_size));
- VRFY((rc != 0), "POSIXREAD");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
-
- /* Partially advance file offset */
- file_offset+=(off_t)(snbytes);
- } /* end while */
- }/* end else */
- } /* end else */
- break;
-
- case MPIO:
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Independent file access */
- if(parms->collective==0) {
- /* Contiguous pattern */
- if (parms->interleaved==0){
- /* Compute offset in file */
- mpi_offset = mpi_file_offset +
- nbytes_xfer;
-
- /* Perform independent read */
- mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type,
- &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end if */
- /* Interleaved access pattern */
- else {
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size;
-
- /* Loop over the buffers to read */
- while(nbytes_toxfer>0) {
- /* Skip offset over blocks of other processes */
- mpi_offset = mpi_file_offset +
- (nbytes_xfer*pio_mpi_nprocs_g);
-
- /* Perform independent read */
- mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p,
- (int)1, mpi_blk_type, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
- } /* end while */
- } /* end else */
- } /* end if */
- /* Collective file access */
- else {
- /* Contiguous access pattern */
- if (parms->interleaved==0){
- /* Compute offset in file */
- mpi_offset = mpi_file_offset +
- nbytes_xfer;
-
- /* Perform collective read */
- mrc = MPI_File_read_at_all(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end if */
- /* Interleaved access pattern */
- else {
- /* Compute offset in file */
- mpi_offset = mpi_file_offset +
- (nbytes_xfer*pio_mpi_nprocs_g);
-
- /* Set the file view */
- mrc = MPI_File_set_view(fd->mpifd, mpi_offset, mpi_blk_type,
- mpi_file_type, (char*)"native", h5_io_info_g);
- VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
-
- /* Perform collective read */
- mrc = MPI_File_read_at_all(fd->mpifd, 0, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
- } /* end else */
- } /* end else */
- } /* end if */
- /* 2D dataspace */
- else {
- /* Independent file access */
- if (parms->collective==0)
- /* Contiguous pattern */
- if (parms->interleaved==0){
- /* Set the base of user's buffer */
- buf_p = (unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer = buf_size*blk_size;
-
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/(snbytes))*
- (blk_size*snbytes)+((nbytes_xfer/blk_size)%(snbytes));
-
- /* Loop over portions of the buffer to read */
- while(nbytes_toxfer>0){
-
- /* Perform independent write */
- mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p, (int)buf_size, MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- /* Advance location in buffer */
- buf_p+=buf_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=buf_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=buf_size;
-
- /* Partially advance global offset in dataset */
- mpi_offset+=snbytes;
- } /* end while */
- } /* end if */
- /* Interleaved access pattern */
- else{
- /* Set the base of user's buffer */
- buf_p=(unsigned char *)buffer;
-
- /* Set the number of bytes to transfer this time */
- nbytes_toxfer=buf_size*blk_size;
-
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/(snbytes)*
- (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%(snbytes);
-
- /* Loop over portions of the buffer to read */
- while(nbytes_toxfer>0){
- /* Perform independent write */
- mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p, (int)blk_size, MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- /* Advance location in buffer */
- buf_p+=blk_size;
-
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
-
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
-
- /* Partially advance global offset in dataset */
- mpi_offset+=snbytes;
- } /* end while */
- } /* end else */
- /* end if */
- /* Collective file access */
- else
- /* Contiguous access pattern */
- if (parms->interleaved==0){
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/(snbytes))*
- (blk_size*snbytes)+((nbytes_xfer/blk_size)%(snbytes));
-
- /* Set the file view */
- mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE, mpi_cont_type, (char *)"native", h5_io_info_g);
- VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
-
- /* Perform read */
- mrc = MPI_File_read_at_all(fd->mpifd, 0, buffer,(int)(buf_size*blk_size),MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- nbytes_xfer+=buf_size*blk_size;
- } /* end if */
- /* Interleaved access pattern */
- else{
- /* Compute offset in file */
- mpi_offset=mpi_file_offset+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/(snbytes)*
- (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%(snbytes);
-
- /* Set the file view */
- mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE, mpi_inter_type, (char *)"native", h5_io_info_g);
- VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
-
- /* Perform read */
- mrc = MPI_File_read_at_all(fd->mpifd, 0, buffer, (int)(buf_size*blk_size), MPI_BYTE, &mpi_status);
- VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
-
- nbytes_xfer+=buf_size*blk_size;
- } /* end else */
- /* end else */
- } /* end else */
- break;
-
- case PHDF5:
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Set up the file dset space id to move the selection to process */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5offset[0] = nbytes_xfer;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
- } /* end else */
- hrc = H5Soffset_simple(h5dset_space_id, h5offset);
- VRFY((hrc >= 0), "H5Soffset_simple");
-
- /* Read the buffer in */
- hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
- h5dset_space_id, h5dxpl, buffer);
- VRFY((hrc >= 0), "H5Dread");
-
- /* Increment number of bytes transferred */
- nbytes_xfer += buf_size;
- } /* end if */
- /* 2D dataspace */
- else {
- /* Set up the file dset space id to move the selection to process */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5offset[0] = (nbytes_xfer/(snbytes*blk_size))*blk_size;
- h5offset[1] = (nbytes_xfer%(snbytes*blk_size))/blk_size;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5offset[0] = ((nbytes_xfer*pio_mpi_nprocs_g)/(snbytes*buf_size))*buf_size;
- h5offset[1] = ((nbytes_xfer*pio_mpi_nprocs_g)%(snbytes*buf_size))/buf_size;
-
- } /* end else */
- hrc = H5Soffset_simple(h5dset_space_id, h5offset);
- VRFY((hrc >= 0), "H5Soffset_simple");
-
- /* Write the buffer out */
- hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
- h5dset_space_id, h5dxpl, buffer);
- VRFY((hrc >= 0), "H5Dread");
-
- /* Increment number of bytes transferred */
- nbytes_xfer += buf_size*blk_size;
-
- } /* end else */
- break;
- } /* switch (parms->io_type) */
-
- /* Verify raw data, if asked */
- if (parms->verify) {
- /* Verify data read */
- unsigned char *ucharptr = (unsigned char *)buffer;
- size_t i;
- int nerror=0;
-
- for (i = 0; i < bsize; ++i){
- if (*ucharptr++ != pio_mpi_rank_g+1) {
- if (++nerror < 20){
- /* report at most 20 errors */
- HDprint_rank(output);
- HDfprintf(output, "read data error, expected (%Hd), "
- "got (%Hd)\n",
- (long_long)pio_mpi_rank_g+1,
- (long_long)*(ucharptr-1));
- } /* end if */
- } /* end if */
- } /* end for */
- if (nerror >= 20) {
- HDprint_rank(output);
- HDfprintf(output, "...");
- HDfprintf(output, "total read data errors=%d\n",
- nerror);
- } /* end if */
- } /* if (parms->verify) */
-
- } /* end while */
-
- /* Stop "raw data" read timer */
- set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, STOP);
-
- /* Calculate read time */
-
- /* Close dataset. Only HDF5 needs to do an explicit close. */
- if (parms->io_type == PHDF5) {
- hrc = H5Dclose(h5ds_id);
-
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
- GOTOERROR(FAIL);
- }
-
- h5ds_id = -1;
- } /* end if */
+ for (ndset = 1; ndset <= ndsets; ++ndset) {
+
+ /* Calculate dataset offset within a file */
+
+ /* create dataset */
+ switch (parms->io_type) {
+ case POSIXIO:
+ case MPIO:
+ /* both posix and mpi io just need dataset offset in file*/
+ dset_offset = (ndset - 1) * nbytes;
+ break;
+
+ case PHDF5:
+ sprintf(dname, "Dataset_%ld", ndset);
+ h5ds_id = H5Dopen2(fd->h5fd, dname, H5P_DEFAULT);
+ if (h5ds_id < 0) {
+ fprintf(stderr, "HDF5 Dataset open failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ break;
+ }
+
+ /* The task is to transfer bytes_count bytes, starting at
+ * bytes_begin position, using transfer buffer of buf_size bytes.
+ * If interleaved, select buf_size at a time, in round robin
+ * fashion, according to number of process. Otherwise, select
+ * all bytes_count in contiguous.
+ */
+ nbytes_xfer = 0 ;
+
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set base file offset for all I/O patterns and POSIX access */
+ posix_file_offset = dset_offset + bytes_begin[0];
+
+ /* Set base file offset for all I/O patterns and MPI access */
+ mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin[0]);
+ } /* end if */
+ else {
+ /* Set base file offset for all I/O patterns and POSIX access */
+ posix_file_offset=dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1];
+
+ /* Set base file offset for all I/O patterns and MPI access */
+ mpi_file_offset=(MPI_Offset)(dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1]);
+ } /* end else */
+
+ /* Start "raw data" read timer */
+ set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, START);
+
+ while (nbytes_xfer < bytes_count){
+ /* Read */
+ /* Calculate offset of read within a dataset/file */
+ switch (parms->io_type) {
+ case POSIXIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Contiguous pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset = posix_file_offset + (off_t)nbytes_xfer;
+
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are read */
+ rc = ((ssize_t)buf_size ==
+ POSIXREAD(fd->posixfd, buffer, buf_size));
+ VRFY((rc != 0), "POSIXREAD");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Set the base of user's buffer */
+ buf_p=(unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size;
+
+ /* Loop over the buffers to read */
+ while(nbytes_toxfer>0) {
+ /* Skip offset over blocks of other processes */
+ file_offset = posix_file_offset +
+ (off_t)(nbytes_xfer*pio_mpi_nprocs_g);
+
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are read */
+ rc = ((ssize_t)blk_size ==
+ POSIXREAD(fd->posixfd, buf_p, blk_size));
+ VRFY((rc != 0), "POSIXREAD");
+
+ /* Advance location in buffer */
+ buf_p+=blk_size;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=blk_size;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=blk_size;
+ } /* end while */
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/blk_size)
+ /snbytes)*(blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)((((nbytes_xfer/buf_size)
+ *pio_mpi_nprocs_g)/snbytes)*(buf_size*snbytes)
+ +((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end else */
+ } /* end if */
+ /* Chunked storage */
+ else {
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = 0;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ /* Before simplification */
+ /* file_offset=posix_file_offset+(off_t)((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*(buf_size/blk_size
+ *snbytes/blk_size*(blk_size*blk_size))+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes/blk_size*(blk_size*blk_size))); */
+
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes*blk_size));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* file_offset_advance = (off_t)(snbytes/blk_size*(blk_size*blk_size)); */
+ file_offset_advance = (off_t)(snbytes*blk_size);
+ } /* end else */
+ } /* end else */
+
+ /* Common code for file access */
+
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size*blk_size;
+
+ /* Loop over portions of the buffer to read */
+ while(nbytes_toxfer>0){
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are read */
+ rc = ((ssize_t)nbytes_xfer_advance ==
+ POSIXREAD(fd->posixfd, buf_p, nbytes_xfer_advance));
+ VRFY((rc != 0), "POSIXREAD");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance file offset */
+ file_offset+=file_offset_advance;
+ } /* end while */
+
+ } /* end else */
+ break;
+
+ case MPIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Independent file access */
+ if(!parms->collective) {
+ /* Contiguous pattern */
+ if (!parms->interleaved){
+ /* Compute offset in file */
+ mpi_offset = mpi_file_offset +
+ nbytes_xfer;
+
+ /* Perform independent read */
+ mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buffer,
+ (int)(buf_size/blk_size), mpi_blk_type,
+ &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Set the base of user's buffer */
+ buf_p=(unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size;
+
+ /* Loop over the buffers to read */
+ while(nbytes_toxfer>0) {
+ /* Skip offset over blocks of other processes */
+ mpi_offset = mpi_file_offset +
+ (nbytes_xfer*pio_mpi_nprocs_g);
+
+ /* Perform independent read */
+ mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p,
+ (int)1, mpi_blk_type, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance location in buffer */
+ buf_p+=blk_size;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=blk_size;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=blk_size;
+ } /* end while */
+ } /* end else */
+ } /* end if */
+ /* Collective file access */
+ else {
+ /* Contiguous access pattern */
+ if (!parms->interleaved){
+ /* Compute offset in file */
+ mpi_offset = mpi_file_offset +
+ nbytes_xfer;
+
+ /* Perform collective read */
+ mrc = MPI_File_read_at_all(fd->mpifd, mpi_offset, buffer,
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ mpi_offset = mpi_file_offset +
+ (nbytes_xfer*pio_mpi_nprocs_g);
+
+ /* Set the file view */
+ mrc = MPI_File_set_view(fd->mpifd, mpi_offset, mpi_blk_type,
+ mpi_file_type, (char*)"native", h5_io_info_g);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
+
+ /* Perform collective read */
+ mrc = MPI_File_read_at_all(fd->mpifd, 0, buffer,
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size;
+ } /* end else */
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/snbytes)*
+ (blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_cont_type;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+(((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/snbytes)*
+ (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_inter_type;
+ } /* end else */
+ } /* end if */
+ /* Chunked storage */
+ else {
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = 0;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_full_buffer;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ /* Before simplification */
+ /* mpi_offset=mpi_file_offset+(nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*
+ (buf_size/blk_size*snbytes/blk_size*(blk_size*blk_size))+
+ ((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes
+ /blk_size*(blk_size*blk_size)); */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)
+ +((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes*blk_size);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* mpi_offset_advance = (MPI_Offset)(snbytes/blk_size*(blk_size*blk_size)); */
+ mpi_offset_advance = (MPI_Offset)(snbytes*blk_size);
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_chunk_inter_type;
+ } /* end else */
+ } /* end else */
+
+ /* Common code for independent file access */
+ if (!parms->collective) {
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size * blk_size;
+
+ /* Loop over portions of the buffer to read */
+ while(nbytes_toxfer>0){
+ /* Perform independent read */
+ mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p,
+ (int)nbytes_xfer_advance, MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance global offset in dataset */
+ mpi_offset+=mpi_offset_advance;
+ } /* end while */
+ } /* end if */
+
+ /* Common code for collective file access */
+ else {
+ /* Set the file view */
+ mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE,
+ mpi_collective_type, (char *)"native", h5_io_info_g);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
+
+ /* Perform read */
+ MPI_File_read_at_all(fd->mpifd, 0, buffer,(int)(buf_size*blk_size),
+ MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size*blk_size;
+ } /* end else */
+
+ } /* end else */
+ break;
+
+ case PHDF5:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = nbytes_xfer;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Read the buffer in */
+ hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dread");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = (nbytes_xfer/(snbytes*blk_size))*blk_size;
+ h5offset[1] = (nbytes_xfer%(snbytes*blk_size))/blk_size;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = ((nbytes_xfer*pio_mpi_nprocs_g)/(snbytes*buf_size))*buf_size;
+ h5offset[1] = ((nbytes_xfer*pio_mpi_nprocs_g)%(snbytes*buf_size))/buf_size;
+
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Write the buffer out */
+ hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dread");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size*blk_size;
+
+ } /* end else */
+ break;
+ } /* switch (parms->io_type) */
+
+ /* Verify raw data, if asked */
+ if (parms->verify) {
+ /* Verify data read */
+ unsigned char *ucharptr = (unsigned char *)buffer;
+ size_t i;
+ int nerror=0;
+
+ for (i = 0; i < bsize; ++i){
+ if (*ucharptr++ != pio_mpi_rank_g+1) {
+ if (++nerror < 20){
+ /* report at most 20 errors */
+ HDprint_rank(output);
+ HDfprintf(output, "read data error, expected (%Hd), "
+ "got (%Hd)\n",
+ (long_long)pio_mpi_rank_g+1,
+ (long_long)*(ucharptr-1));
+ } /* end if */
+ } /* end if */
+ } /* end for */
+ if (nerror >= 20) {
+ HDprint_rank(output);
+ HDfprintf(output, "...");
+ HDfprintf(output, "total read data errors=%d\n",
+ nerror);
+ } /* end if */
+ } /* if (parms->verify) */
+
+ } /* end while */
+
+ /* Stop "raw data" read timer */
+ set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, STOP);
+
+ /* Calculate read time */
+
+ /* Close dataset. Only HDF5 needs to do an explicit close. */
+ if (parms->io_type == PHDF5) {
+ hrc = H5Dclose(h5ds_id);
+
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Close failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ h5ds_id = -1;
+ } /* end if */
} /* end for */
done:
/* release MPI-I/O objects */
if (parms->io_type == MPIO) {
- /* 1D dataspace */
- if (parms->dim2d==0){
- /* Free file type */
- mrc = MPI_Type_free( &mpi_file_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free buffer type */
- mrc = MPI_Type_free( &mpi_blk_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
- } /* end if */
- /* 2D dataspace */
- else {
- /* Free file type */
- mrc = MPI_Type_free( &mpi_cont_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free partial buffer type */
- mrc = MPI_Type_free( &contig_cont );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free file type */
- mrc = MPI_Type_free( &mpi_inter_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
-
- /* Free partial buffer type */
- mrc = MPI_Type_free( &contig_inter );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
- } /* end else */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Free file type */
+ mrc = MPI_Type_free( &mpi_file_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free buffer type */
+ mrc = MPI_Type_free( &mpi_blk_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Free partial buffer type for contiguous access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_cont );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free contiguous file type */
+ mrc = MPI_Type_free( &mpi_cont_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free partial buffer type for interleaved access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_inter );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free interleaved file type */
+ mrc = MPI_Type_free( &mpi_inter_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full buffer type */
+ mrc = MPI_Type_free(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full chunk type */
+ mrc = MPI_Type_free(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free chunk interleaved file type */
+ mrc = MPI_Type_free(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end else */
} /* end if */
/* release HDF5 objects */
if (h5dset_space_id != -1) {
- hrc = H5Sclose(h5dset_space_id);
- if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5dset_space_id = -1;
- }
+ hrc = H5Sclose(h5dset_space_id);
+ if (hrc < 0){
+ fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dset_space_id = -1;
+ }
}
if (h5mem_space_id != -1) {
- hrc = H5Sclose(h5mem_space_id);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5mem_space_id = -1;
- }
+ hrc = H5Sclose(h5mem_space_id);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5mem_space_id = -1;
+ }
}
if (h5dxpl != -1) {
- hrc = H5Pclose(h5dxpl);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
- ret_code = FAIL;
- } else {
- h5dxpl = -1;
- }
+ hrc = H5Pclose(h5dxpl);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dxpl = -1;
+ }
}
return ret_code;
@@ -2230,115 +2443,115 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
hbool_t use_gpfs = FALSE; /* use GPFS hints */
switch (param->io_type) {
- case POSIXIO:
- if (flags & (PIO_CREATE | PIO_WRITE))
- fd->posixfd = POSIXCREATE(fname);
- else
- fd->posixfd = POSIXOPEN(fname, O_RDONLY);
-
- if (fd->posixfd < 0 ) {
- fprintf(stderr, "POSIX File Open failed(%s)\n", fname);
- GOTOERROR(FAIL);
- }
-
-
- /* The perils of POSIX I/O in a parallel environment. The problem is:
- *
- * - Process n opens a file with truncation and then starts
- * writing to the file.
- * - Process m also opens the file with truncation, but after
- * process n has already started to write to the file. Thus,
- * all of the stuff process n wrote is now lost.
- */
- MPI_Barrier(pio_comm_g);
-
- break;
-
- case MPIO:
- if (flags & (PIO_CREATE | PIO_WRITE)) {
- MPI_File_delete(fname, h5_io_info_g);
- mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_CREATE | MPI_MODE_RDWR,
- h5_io_info_g, &fd->mpifd);
-
- if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI File Open failed(%s)\n", fname);
- GOTOERROR(FAIL);
- }
-
- /*since MPI_File_open with MPI_MODE_CREATE does not truncate */
- /*filesize , set size to 0 explicitedly. */
- mrc = MPI_File_set_size(fd->mpifd, (MPI_Offset)0);
-
- if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI_File_set_size failed\n");
- GOTOERROR(FAIL);
- }
- } else {
- mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_RDONLY,
- h5_io_info_g, &fd->mpifd);
-
- if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI File Open failed(%s)\n", fname);
- GOTOERROR(FAIL);
- }
- }
-
- break;
-
- case PHDF5:
- acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
- if (acc_tpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
- }
-
- /* Use the appropriate VFL driver */
- if(param->h5_use_mpi_posix) {
- /* Set the file driver to the MPI-posix driver */
- hrc = H5Pset_fapl_mpiposix(acc_tpl, pio_comm_g, use_gpfs);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- }
- } /* end if */
- else {
- /* Set the file driver to the MPI-I/O driver */
- hrc = H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- }
- } /* end else */
-
- /* Set the alignment of objects in HDF5 file */
- hrc = H5Pset_alignment(acc_tpl, param->h5_thresh, param->h5_align);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- }
-
- /* create the parallel file */
- if (flags & (PIO_CREATE | PIO_WRITE)) {
- fd->h5fd = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- } else {
- fd->h5fd = H5Fopen(fname, H5F_ACC_RDONLY, acc_tpl);
- }
-
- hrc = H5Pclose(acc_tpl);
-
- if (fd->h5fd < 0) {
- fprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
- GOTOERROR(FAIL);
- }
-
- /* verifying the close of the acc_tpl */
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Close failed\n");
- GOTOERROR(FAIL);
- }
-
- break;
- }
+ case POSIXIO:
+ if (flags & (PIO_CREATE | PIO_WRITE))
+ fd->posixfd = POSIXCREATE(fname);
+ else
+ fd->posixfd = POSIXOPEN(fname, O_RDONLY);
+
+ if (fd->posixfd < 0 ) {
+ fprintf(stderr, "POSIX File Open failed(%s)\n", fname);
+ GOTOERROR(FAIL);
+ }
+
+
+ /* The perils of POSIX I/O in a parallel environment. The problem is:
+ *
+ * - Process n opens a file with truncation and then starts
+ * writing to the file.
+ * - Process m also opens the file with truncation, but after
+ * process n has already started to write to the file. Thus,
+ * all of the stuff process n wrote is now lost.
+ */
+ MPI_Barrier(pio_comm_g);
+
+ break;
+
+ case MPIO:
+ if (flags & (PIO_CREATE | PIO_WRITE)) {
+ MPI_File_delete(fname, h5_io_info_g);
+ mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_CREATE | MPI_MODE_RDWR,
+ h5_io_info_g, &fd->mpifd);
+
+ if (mrc != MPI_SUCCESS) {
+ fprintf(stderr, "MPI File Open failed(%s)\n", fname);
+ GOTOERROR(FAIL);
+ }
+
+ /*since MPI_File_open with MPI_MODE_CREATE does not truncate */
+ /*filesize , set size to 0 explicitedly. */
+ mrc = MPI_File_set_size(fd->mpifd, (MPI_Offset)0);
+
+ if (mrc != MPI_SUCCESS) {
+ fprintf(stderr, "MPI_File_set_size failed\n");
+ GOTOERROR(FAIL);
+ }
+ } else {
+ mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_RDONLY,
+ h5_io_info_g, &fd->mpifd);
+
+ if (mrc != MPI_SUCCESS) {
+ fprintf(stderr, "MPI File Open failed(%s)\n", fname);
+ GOTOERROR(FAIL);
+ }
+ }
+
+ break;
+
+ case PHDF5:
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
+ if (acc_tpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ /* Use the appropriate VFL driver */
+ if(param->h5_use_mpi_posix) {
+ /* Set the file driver to the MPI-posix driver */
+ hrc = H5Pset_fapl_mpiposix(acc_tpl, pio_comm_g, use_gpfs);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ }
+ } /* end if */
+ else {
+ /* Set the file driver to the MPI-I/O driver */
+ hrc = H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ }
+ } /* end else */
+
+ /* Set the alignment of objects in HDF5 file */
+ hrc = H5Pset_alignment(acc_tpl, param->h5_thresh, param->h5_align);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ /* create the parallel file */
+ if (flags & (PIO_CREATE | PIO_WRITE)) {
+ fd->h5fd = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ } else {
+ fd->h5fd = H5Fopen(fname, H5F_ACC_RDONLY, acc_tpl);
+ }
+
+ hrc = H5Pclose(acc_tpl);
+
+ if (fd->h5fd < 0) {
+ fprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
+ GOTOERROR(FAIL);
+ }
+
+ /* verifying the close of the acc_tpl */
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Close failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ break;
+ }
done:
return ret_code;
@@ -2358,38 +2571,38 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
int mrc = 0, rc = 0;
switch (iot) {
- case POSIXIO:
- rc = POSIXCLOSE(fd->posixfd);
+ case POSIXIO:
+ rc = POSIXCLOSE(fd->posixfd);
- if (rc != 0){
- fprintf(stderr, "POSIX File Close failed\n");
- GOTOERROR(FAIL);
- }
+ if (rc != 0){
+ fprintf(stderr, "POSIX File Close failed\n");
+ GOTOERROR(FAIL);
+ }
- fd->posixfd = -1;
- break;
+ fd->posixfd = -1;
+ break;
- case MPIO:
- mrc = MPI_File_close(&fd->mpifd);
+ case MPIO:
+ mrc = MPI_File_close(&fd->mpifd);
- if (mrc != MPI_SUCCESS){
- fprintf(stderr, "MPI File close failed\n");
- GOTOERROR(FAIL);
- }
+ if (mrc != MPI_SUCCESS){
+ fprintf(stderr, "MPI File close failed\n");
+ GOTOERROR(FAIL);
+ }
- fd->mpifd = MPI_FILE_NULL;
- break;
+ fd->mpifd = MPI_FILE_NULL;
+ break;
- case PHDF5:
- hrc = H5Fclose(fd->h5fd);
+ case PHDF5:
+ hrc = H5Fclose(fd->h5fd);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 File Close failed\n");
- GOTOERROR(FAIL);
- }
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 File Close failed\n");
+ GOTOERROR(FAIL);
+ }
- fd->h5fd = -1;
- break;
+ fd->h5fd = -1;
+ break;
}
done:
@@ -2400,8 +2613,8 @@ done:
/*
* Function: do_fclose
* Purpose: Cleanup temporary file unless HDF5_NOCLEANUP is set.
- * Only Proc 0 of the PIO communicator will do the cleanup.
- * Other processes just return.
+ * Only Proc 0 of the PIO communicator will do the cleanup.
+ * Other processes just return.
* Return: void
* Programmer: Albert Cheng 2001/12/12
* Modifications:
@@ -2410,21 +2623,21 @@ done:
do_cleanupfile(iotype iot, char *fname)
{
if (pio_mpi_rank_g != 0)
- return;
+ return;
if (clean_file_g == -1)
- clean_file_g = (getenv("HDF5_NOCLEANUP")==NULL) ? 1 : 0;
+ clean_file_g = (getenv("HDF5_NOCLEANUP")==NULL) ? 1 : 0;
if (clean_file_g){
- switch (iot){
- case POSIXIO:
- remove(fname);
- break;
- case MPIO:
- case PHDF5:
- MPI_File_delete(fname, h5_io_info_g);
- break;
- }
+ switch (iot){
+ case POSIXIO:
+ remove(fname);
+ break;
+ case MPIO:
+ case PHDF5:
+ MPI_File_delete(fname, h5_io_info_g);
+ break;
+ }
}
}
@@ -2465,8 +2678,8 @@ do_cleanupfile(iotype iot, char *fname)
gpfs_access_range(int handle, off_t start, off_t length, int is_write)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsAccessRange_t access;
+ gpfsFcntlHeader_t hdr;
+ gpfsAccessRange_t access;
} access_range;
access_range.hdr.totalLength = sizeof(access_range);
@@ -2479,10 +2692,10 @@ gpfs_access_range(int handle, off_t start, off_t length, int is_write)
access_range.access.isWrite = is_write;
if (gpfs_fcntl(handle, &access_range) != 0) {
- fprintf(stderr,
- "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
- errno, access_range.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
+ errno, access_range.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -2514,8 +2727,8 @@ gpfs_access_range(int handle, off_t start, off_t length, int is_write)
gpfs_free_range(int handle, off_t start, off_t length)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsFreeRange_t range;
+ gpfsFcntlHeader_t hdr;
+ gpfsFreeRange_t range;
} free_range;
/* Issue the invalidate hint */
@@ -2528,10 +2741,10 @@ gpfs_free_range(int handle, off_t start, off_t length)
free_range.range.length = length;
if (gpfs_fcntl(handle, &free_range) != 0) {
- fprintf(stderr,
- "gpfs_fcntl free range failed for range %d:%d. errno=%d errorOffset=%d\n",
- start, length, errno, free_range.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl free range failed for range %d:%d. errno=%d errorOffset=%d\n",
+ start, length, errno, free_range.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -2558,8 +2771,8 @@ gpfs_free_range(int handle, off_t start, off_t length)
gpfs_clear_file_cache(int handle)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsClearFileCache_t clear;
+ gpfsFcntlHeader_t hdr;
+ gpfsClearFileCache_t clear;
} clear_cache;
clear_cache.hdr.totalLength = sizeof(clear_cache);
@@ -2569,10 +2782,10 @@ gpfs_clear_file_cache(int handle)
clear_cache.clear.structType = GPFS_CLEAR_FILE_CACHE;
if (gpfs_fcntl(handle, &clear_cache) != 0) {
- fprintf(stderr,
- "gpfs_fcntl clear file cache directive failed. errno=%d errorOffset=%d\n",
- errno, clear_cache.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl clear file cache directive failed. errno=%d errorOffset=%d\n",
+ errno, clear_cache.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -2604,8 +2817,8 @@ gpfs_clear_file_cache(int handle)
gpfs_cancel_hints(int handle)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsCancelHints_t cancel;
+ gpfsFcntlHeader_t hdr;
+ gpfsCancelHints_t cancel;
} cancel_hints;
cancel_hints.hdr.totalLength = sizeof(cancel_hints);
@@ -2615,10 +2828,10 @@ gpfs_cancel_hints(int handle)
cancel_hints.cancel.structType = GPFS_CANCEL_HINTS;
if (gpfs_fcntl(handle, &cancel_hints) != 0) {
- fprintf(stderr,
- "gpfs_fcntl cancel hints directive failed. errno=%d errorOffset=%d\n",
- errno, cancel_hints.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl cancel hints directive failed. errno=%d errorOffset=%d\n",
+ errno, cancel_hints.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -2650,8 +2863,8 @@ gpfs_cancel_hints(int handle)
gpfs_start_data_shipping(int handle, int num_insts)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsDataShipStart_t start;
+ gpfsFcntlHeader_t hdr;
+ gpfsDataShipStart_t start;
} ds_start;
ds_start.hdr.totalLength = sizeof(ds_start);
@@ -2663,10 +2876,10 @@ gpfs_start_data_shipping(int handle, int num_insts)
ds_start.start.reserved = 0;
if (gpfs_fcntl(handle, &ds_start) != 0) {
- fprintf(stderr,
- "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
- errno, ds_start.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
+ errno, ds_start.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -2691,12 +2904,12 @@ gpfs_start_data_shipping(int handle, int num_insts)
*/
static void
gpfs_start_data_ship_map(int handle, int partition_size, int agent_count,
- int *agent_node_num)
+ int *agent_node_num)
{
int i;
struct {
- gpfsFcntlHeader_t hdr;
- gpfsDataShipMap_t map;
+ gpfsFcntlHeader_t hdr;
+ gpfsDataShipMap_t map;
} ds_map;
ds_map.hdr.totalLength = sizeof(ds_map);
@@ -2708,13 +2921,13 @@ gpfs_start_data_ship_map(int handle, int partition_size, int agent_count,
ds_map.map.agentCount = agent_count;
for (i = 0; i < agent_count; ++i)
- ds_map.map.agentNodeNumber[i] = agent_node_num[i];
+ ds_map.map.agentNodeNumber[i] = agent_node_num[i];
if (gpfs_fcntl(handle, &ds_map) != 0) {
- fprintf(stderr,
- "gpfs_fcntl DS map directive failed. errno=%d errorOffset=%d\n",
- errno, ds_map.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl DS map directive failed. errno=%d errorOffset=%d\n",
+ errno, ds_map.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -2746,8 +2959,8 @@ gpfs_start_data_ship_map(int handle, int partition_size, int agent_count,
gpfs_stop_data_shipping(int handle)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsDataShipStop_t stop;
+ gpfsFcntlHeader_t hdr;
+ gpfsDataShipStop_t stop;
} ds_stop;
ds_stop.hdr.totalLength = sizeof(ds_stop);
@@ -2757,9 +2970,9 @@ gpfs_stop_data_shipping(int handle)
ds_stop.stop.structType = GPFS_DATA_SHIP_STOP;
if (gpfs_fcntl(handle, &ds_stop) != 0)
- fprintf(stderr,
- "gpfs_fcntl DS stop directive failed. errno=%d errorOffset=%d\n",
- errno, ds_stop.hdr.errorOffset);
+ fprintf(stderr,
+ "gpfs_fcntl DS stop directive failed. errno=%d errorOffset=%d\n",
+ errno, ds_stop.hdr.errorOffset);
}
/*
@@ -2775,15 +2988,15 @@ gpfs_invalidate_file_cache(const char *filename)
{
int handle;
struct {
- gpfsFcntlHeader_t hdr;
- gpfsClearFileCache_t inv;
+ gpfsFcntlHeader_t hdr;
+ gpfsClearFileCache_t inv;
} inv_cache_hint;
/* Open the file. If the open fails, the file cannot be cached. */
handle = open(filename, O_RDONLY, 0);
if (handle == -1)
- return;
+ return;
/* Issue the invalidate hint */
inv_cache_hint.hdr.totalLength = sizeof(inv_cache_hint);
@@ -2793,21 +3006,21 @@ gpfs_invalidate_file_cache(const char *filename)
inv_cache_hint.inv.structType = GPFS_CLEAR_FILE_CACHE;
if (gpfs_fcntl(handle, &inv_cache_hint) != 0) {
- fprintf(stderr,
- "gpfs_fcntl clear cache hint failed for file '%s'.",
- filename);
- fprintf(stderr, " errno=%d errorOffset=%d\n",
- errno, inv_cache_hint.hdr.errorOffset);
- exit(1);
+ fprintf(stderr,
+ "gpfs_fcntl clear cache hint failed for file '%s'.",
+ filename);
+ fprintf(stderr, " errno=%d errorOffset=%d\n",
+ errno, inv_cache_hint.hdr.errorOffset);
+ exit(1);
}
/* Close the file */
if (close(handle) == -1) {
- fprintf(stderr,
- "could not close file '%s' after flushing file cache, ",
- filename);
- fprintf(stderr, "errno=%d\n", errno);
- exit(1);
+ fprintf(stderr,
+ "could not close file '%s' after flushing file cache, ",
+ filename);
+ fprintf(stderr, "errno=%d\n", errno);
+ exit(1);
}
}
@@ -2819,7 +3032,7 @@ gpfs_invalidate_file_cache(const char *filename)
static void
gpfs_access_range(int UNUSED handle, off_t UNUSED start, off_t UNUSED length,
- int UNUSED is_write)
+ int UNUSED is_write)
{
return;
}
@@ -2856,7 +3069,7 @@ gpfs_stop_data_shipping(int UNUSED handle)
static void
gpfs_start_data_ship_map(int UNUSED handle, int UNUSED partition_size,
- int UNUSED agent_count, int UNUSED *agent_node_num)
+ int UNUSED agent_count, int UNUSED *agent_node_num)
{
return;
}
@@ -2876,7 +3089,7 @@ gpfs_invalidate_file_cache(const char UNUSED *filename)
* pure time spent in MPI_File code.
*/
int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_READ, START);
@@ -2887,7 +3100,7 @@ int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_READ, START);
@@ -2897,7 +3110,7 @@ int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,
}
int MPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_WRITE, START);
@@ -2907,7 +3120,7 @@ int MPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
}
int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_WRITE, START);
@@ -2916,5 +3129,10 @@ int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf,
return err;
}
-#endif /* TIME_MPI */
+#endif /* TIME_MPI */
#endif /* H5_HAVE_PARALLEL */
+
+
+
+
+
diff --git a/perform/pio_perf.c b/perform/pio_perf.c
index b9ce309..bb1880a 100644
--- a/perform/pio_perf.c
+++ b/perform/pio_perf.c
@@ -287,7 +287,7 @@ struct options {
size_t blk_size; /* Block size */
unsigned interleaved; /* Interleaved vs. contiguous blocks */
unsigned collective; /* Collective vs. independent I/O */
- unsigned dim2d; /* 1D vs. 2D */
+ unsigned dim2d; /* 1D vs. 2D geometry */
int print_times; /* print times as well as throughputs */
int print_raw; /* print raw data throughput info */
off_t h5_alignment; /* alignment in HDF5 file */
@@ -453,8 +453,8 @@ run_test_loop(struct options *opts)
/* do something harsh */
}
- /* only processes doing PIO will run the tests */
- if (doing_pio){
+ /* only processes doing PIO will run the tests */
+ if (doing_pio){
output_report("Number of processors = %ld\n", parms.num_procs);
/* multiply the xfer buffer size by 2 for each loop iteration */
@@ -462,33 +462,33 @@ run_test_loop(struct options *opts)
buf_size <= opts->max_xfer_size; buf_size <<= 1) {
parms.buf_size = buf_size;
- if (parms.dim2d){
- parms.num_bytes = (off_t)pow((double)(opts->num_bpp*parms.num_procs),2);
- if (parms.interleaved)
- output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
- buf_size, opts->blk_size,
- ((double)parms.num_dsets * (double)parms.num_bytes)
- / ONE_MB);
- else
- output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
- opts->blk_size, buf_size,
- ((double)parms.num_dsets * (double)parms.num_bytes)
- / ONE_MB);
-
- print_indent(1);
- output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2fx%.2f KBs\n",
- parms.num_files, parms.num_dsets, (double)(opts->num_bpp*parms.num_procs)/ONE_KB,
- (double)(opts->num_bpp*parms.num_procs)/ONE_KB);
- }
- else{
- parms.num_bytes = (off_t)opts->num_bpp*parms.num_procs;
- output_report("Transfer Buffer Size: %ld bytes, File size: %.2f MBs\n",
- buf_size,((double)parms.num_dsets * (double)parms.num_bytes) / ONE_MB);
-
- print_indent(1);
- output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2f MBs\n",
- parms.num_files, parms.num_dsets, (double)(opts->num_bpp*parms.num_procs)/ONE_MB);
- }
+ if (parms.dim2d){
+ parms.num_bytes = (off_t)pow((double)(opts->num_bpp*parms.num_procs),2);
+ if (parms.interleaved)
+ output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
+ buf_size, opts->blk_size,
+ ((double)parms.num_dsets * (double)parms.num_bytes)
+ / ONE_MB);
+ else
+ output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
+ opts->blk_size, buf_size,
+ ((double)parms.num_dsets * (double)parms.num_bytes)
+ / ONE_MB);
+
+ print_indent(1);
+ output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2fx%.2f KBs\n",
+ parms.num_files, parms.num_dsets, (double)(opts->num_bpp*parms.num_procs)/ONE_KB,
+ (double)(opts->num_bpp*parms.num_procs)/ONE_KB);
+ }
+ else{
+ parms.num_bytes = (off_t)opts->num_bpp*parms.num_procs;
+ output_report("Transfer Buffer Size: %ld bytes, File size: %.2f MBs\n",
+ buf_size,((double)parms.num_dsets * (double)parms.num_bytes) / ONE_MB);
+
+ print_indent(1);
+ output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2f MBs\n",
+ parms.num_files, parms.num_dsets, (double)(opts->num_bpp*parms.num_procs)/ONE_MB);
+ }
if (opts->io_types & PIO_POSIX)
run_test(POSIXIO, parms, opts);
@@ -507,7 +507,7 @@ run_test_loop(struct options *opts)
if (destroy_comm_world() != SUCCESS) {
/* do something harsh */
}
- }
+ }
}
}
@@ -533,10 +533,10 @@ run_test(iotype iot, parameters parms, struct options *opts)
minmax *read_mm_table=NULL;
minmax *read_gross_mm_table=NULL;
minmax *read_raw_mm_table=NULL;
- minmax *read_open_mm_table=NULL;
- minmax *read_close_mm_table=NULL;
- minmax *write_open_mm_table=NULL;
- minmax *write_close_mm_table=NULL;
+ minmax *read_open_mm_table=NULL;
+ minmax *read_close_mm_table=NULL;
+ minmax *write_open_mm_table=NULL;
+ minmax *write_close_mm_table=NULL;
minmax write_mpi_mm = {0.0, 0.0, 0.0, 0};
minmax write_mm = {0.0, 0.0, 0.0, 0};
minmax write_gross_mm = {0.0, 0.0, 0.0, 0};
@@ -545,10 +545,10 @@ run_test(iotype iot, parameters parms, struct options *opts)
minmax read_mm = {0.0, 0.0, 0.0, 0};
minmax read_gross_mm = {0.0, 0.0, 0.0, 0};
minmax read_raw_mm = {0.0, 0.0, 0.0, 0};
- minmax read_open_mm = {0.0, 0.0, 0.0, 0};
- minmax read_close_mm = {0.0, 0.0, 0.0, 0};
- minmax write_open_mm = {0.0, 0.0, 0.0, 0};
- minmax write_close_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_open_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_close_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_open_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_close_mm = {0.0, 0.0, 0.0, 0};
raw_size = (off_t)parms.num_dsets * (off_t)parms.num_bytes;
parms.io_type = iot;
@@ -582,14 +582,13 @@ run_test(iotype iot, parameters parms, struct options *opts)
write_close_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
if (!parms.h5_write_only) {
- read_mpi_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
- read_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
- read_gross_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
- read_raw_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
- read_open_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
- read_close_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
-
- }
+ read_mpi_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_gross_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_raw_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_open_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_close_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ }
/* Do IO iteration times, collecting statistics each time */
for (i = 0; i < parms.num_iters; ++i) {
@@ -600,37 +599,37 @@ run_test(iotype iot, parameters parms, struct options *opts)
/* gather all of the "mpi write" times */
t = get_time(res.timers, HDF5_MPI_WRITE);
- get_minmax(&write_mpi_mm, t);
+ get_minmax(&write_mpi_mm, t);
write_mpi_mm_table[i] = write_mpi_mm;
/* gather all of the "write" times */
t = get_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS);
- get_minmax(&write_mm, t);
+ get_minmax(&write_mm, t);
write_mm_table[i] = write_mm;
/* gather all of the "write" times from open to close */
t = get_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS);
- get_minmax(&write_gross_mm, t);
+ get_minmax(&write_gross_mm, t);
write_gross_mm_table[i] = write_gross_mm;
/* gather all of the raw "write" times */
- t = get_time(res.timers, HDF5_RAW_WRITE_FIXED_DIMS);
- get_minmax(&write_raw_mm, t);
+ t = get_time(res.timers, HDF5_RAW_WRITE_FIXED_DIMS);
+ get_minmax(&write_raw_mm, t);
- write_raw_mm_table[i] = write_raw_mm;
+ write_raw_mm_table[i] = write_raw_mm;
- /* gather all of the file open times (time from open to first write) */
- t = get_time(res.timers, HDF5_FILE_WRITE_OPEN);
- get_minmax(&write_open_mm, t);
+ /* gather all of the file open times (time from open to first write) */
+ t = get_time(res.timers, HDF5_FILE_WRITE_OPEN);
+ get_minmax(&write_open_mm, t);
- write_open_mm_table[i] = write_open_mm;
+ write_open_mm_table[i] = write_open_mm;
- /* gather all of the file close times (time from last write to close) */
- t = get_time(res.timers, HDF5_FILE_WRITE_CLOSE);
- get_minmax(&write_close_mm, t);
+ /* gather all of the file close times (time from last write to close) */
+ t = get_time(res.timers, HDF5_FILE_WRITE_CLOSE);
+ get_minmax(&write_close_mm, t);
write_close_mm_table[i] = write_close_mm;
@@ -659,21 +658,21 @@ run_test(iotype iot, parameters parms, struct options *opts)
read_raw_mm_table[i] = read_raw_mm;
- /* gather all of the file open times (time from open to first read) */
- t = get_time(res.timers, HDF5_FILE_READ_OPEN);
- get_minmax(&read_open_mm, t);
+ /* gather all of the file open times (time from open to first read) */
+ t = get_time(res.timers, HDF5_FILE_READ_OPEN);
+ get_minmax(&read_open_mm, t);
- read_open_mm_table[i] = read_open_mm;
+ read_open_mm_table[i] = read_open_mm;
- /* gather all of the file close times (time from last read to close) */
- t = get_time(res.timers, HDF5_FILE_READ_CLOSE);
- get_minmax(&read_close_mm, t);
+ /* gather all of the file close times (time from last read to close) */
+ t = get_time(res.timers, HDF5_FILE_READ_CLOSE);
+ get_minmax(&read_close_mm, t);
- read_close_mm_table[i] = read_close_mm;
+ read_close_mm_table[i] = read_close_mm;
- }
+ }
- pio_time_destroy(res.timers);
+ pio_time_destroy(res.timers);
}
/*
@@ -722,25 +721,22 @@ run_test(iotype iot, parameters parms, struct options *opts)
}
output_results(opts,"Write Open-Close",write_gross_mm_table,parms.num_iters,raw_size);
-
/* Print out time from open to first write */
if (pio_debug_level >= 3) {
- /* output all of the times for all iterations */
- print_indent(3);
- output_report("Write file open details:\n");
- output_all_info(write_open_mm_table, parms.num_iters, 4);
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Write file open details:\n");
+ output_all_info(write_open_mm_table, parms.num_iters, 4);
}
/* Print out time from last write to close */
if (pio_debug_level >= 3) {
- /* output all of the times for all iterations */
- print_indent(3);
- output_report("Write file close details:\n");
- output_all_info(write_close_mm_table, parms.num_iters, 4);
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Write file close details:\n");
+ output_all_info(write_close_mm_table, parms.num_iters, 4);
}
-
-
if (!parms.h5_write_only) {
/* Read statistics */
/* Print the raw data throughput if desired */
@@ -785,25 +781,24 @@ run_test(iotype iot, parameters parms, struct options *opts)
output_all_info(read_gross_mm_table, parms.num_iters, 4);
}
- output_results(opts, "Read Open-Close", read_gross_mm_table,
- parms.num_iters, raw_size);
-
+ output_results(opts, "Read Open-Close", read_gross_mm_table,
+ parms.num_iters, raw_size);
- /* Print out time from open to first read */
- if (pio_debug_level >= 3) {
- /* output all of the times for all iterations */
- print_indent(3);
- output_report("Read file open details:\n");
- output_all_info(read_open_mm_table, parms.num_iters, 4);
- }
+ /* Print out time from open to first read */
+ if (pio_debug_level >= 3) {
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Read file open details:\n");
+ output_all_info(read_open_mm_table, parms.num_iters, 4);
+ }
- /* Print out time from last read to close */
- if (pio_debug_level >= 3) {
- /* output all of the times for all iterations */
- print_indent(3);
- output_report("Read file close details:\n");
- output_all_info(read_close_mm_table, parms.num_iters, 4);
- }
+ /* Print out time from last read to close */
+ if (pio_debug_level >= 3) {
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Read file close details:\n");
+ output_all_info(read_close_mm_table, parms.num_iters, 4);
+ }
}
@@ -820,8 +815,8 @@ run_test(iotype iot, parameters parms, struct options *opts)
free(read_mm_table);
free(read_gross_mm_table);
free(read_raw_mm_table);
- free(read_open_mm_table);
- free(read_close_mm_table);
+ free(read_open_mm_table);
+ free(read_close_mm_table);
}
return ret_value;
@@ -1126,58 +1121,58 @@ report_parameters(struct options *opts)
opts->min_num_procs, opts->max_num_procs);
if (opts->dim2d){
- HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
- recover_size_and_print((long_long)(opts->num_bpp * opts->num_bpp * opts->min_num_procs), ":");
- recover_size_and_print((long_long)(opts->num_bpp * opts->num_bpp * opts->max_num_procs), "\n");
-
- HDfprintf(output, "rank %d: Size of dataset(s)=", rank);
- recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), "x");
- recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), ":");
- recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "x");
- recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "\n");
-
- HDfprintf(output, "rank %d: File size=", rank);
- recover_size_and_print((long_long)(pow(opts->num_bpp * opts->min_num_procs,2)
- * opts->num_dsets), ":");
- recover_size_and_print((long_long)(pow(opts->num_bpp * opts->max_num_procs,2)
- * opts->num_dsets), "\n");
-
- HDfprintf(output, "rank %d: Transfer buffer size=", rank);
- if(opts->interleaved){
- recover_size_and_print((long_long)opts->min_xfer_size, "x");
- recover_size_and_print((long_long)opts->blk_size, ":");
- recover_size_and_print((long_long)opts->max_xfer_size, "x");
- recover_size_and_print((long_long)opts->blk_size, "\n");
- }
- else{
- recover_size_and_print((long_long)opts->blk_size, "x");
- recover_size_and_print((long_long)opts->min_xfer_size, ":");
- recover_size_and_print((long_long)opts->blk_size, "x");
- recover_size_and_print((long_long)opts->max_xfer_size, "\n");
- }
- HDfprintf(output, "rank %d: Block size=", rank);
- recover_size_and_print((long_long)opts->blk_size, "x");
- recover_size_and_print((long_long)opts->blk_size, "\n");
+ HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
+ recover_size_and_print((long_long)(opts->num_bpp * opts->num_bpp * opts->min_num_procs), ":");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->num_bpp * opts->max_num_procs), "\n");
+
+ HDfprintf(output, "rank %d: Size of dataset(s)=", rank);
+ recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), "x");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), ":");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "x");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "\n");
+
+ HDfprintf(output, "rank %d: File size=", rank);
+ recover_size_and_print((long_long)(pow(opts->num_bpp * opts->min_num_procs,2)
+ * opts->num_dsets), ":");
+ recover_size_and_print((long_long)(pow(opts->num_bpp * opts->max_num_procs,2)
+ * opts->num_dsets), "\n");
+
+ HDfprintf(output, "rank %d: Transfer buffer size=", rank);
+ if(opts->interleaved){
+ recover_size_and_print((long_long)opts->min_xfer_size, "x");
+ recover_size_and_print((long_long)opts->blk_size, ":");
+ recover_size_and_print((long_long)opts->max_xfer_size, "x");
+ recover_size_and_print((long_long)opts->blk_size, "\n");
+ }
+ else{
+ recover_size_and_print((long_long)opts->blk_size, "x");
+ recover_size_and_print((long_long)opts->min_xfer_size, ":");
+ recover_size_and_print((long_long)opts->blk_size, "x");
+ recover_size_and_print((long_long)opts->max_xfer_size, "\n");
+ }
+ HDfprintf(output, "rank %d: Block size=", rank);
+ recover_size_and_print((long_long)opts->blk_size, "x");
+ recover_size_and_print((long_long)opts->blk_size, "\n");
}
else{
- HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
- recover_size_and_print((long_long)opts->num_bpp, "\n");
-
- HDfprintf(output, "rank %d: Size of dataset(s)=", rank);
- recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), ":");
- recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "\n");
-
- HDfprintf(output, "rank %d: File size=", rank);
- recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs
- * opts->num_dsets), ":");
- recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs
- * opts->num_dsets), "\n");
-
- HDfprintf(output, "rank %d: Transfer buffer size=", rank);
- recover_size_and_print((long_long)opts->min_xfer_size, ":");
- recover_size_and_print((long_long)opts->max_xfer_size, "\n");
- HDfprintf(output, "rank %d: Block size=", rank);
- recover_size_and_print((long_long)opts->blk_size, "\n");
+ HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
+ recover_size_and_print((long_long)opts->num_bpp, "\n");
+
+ HDfprintf(output, "rank %d: Size of dataset(s)=", rank);
+ recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), ":");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "\n");
+
+ HDfprintf(output, "rank %d: File size=", rank);
+ recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs
+ * opts->num_dsets), ":");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs
+ * opts->num_dsets), "\n");
+
+ HDfprintf(output, "rank %d: Transfer buffer size=", rank);
+ recover_size_and_print((long_long)opts->min_xfer_size, ":");
+ recover_size_and_print((long_long)opts->max_xfer_size, "\n");
+ HDfprintf(output, "rank %d: Block size=", rank);
+ recover_size_and_print((long_long)opts->blk_size, "\n");
}
HDfprintf(output, "rank %d: Block Pattern in Dataset=", rank);
@@ -1245,13 +1240,13 @@ parse_command_line(int argc, char *argv[])
cl_opts->io_types = 0; /* will set default after parsing options */
cl_opts->num_dsets = 1;
cl_opts->num_files = 1;
- cl_opts->num_bpp = 256 * ONE_KB;
+ cl_opts->num_bpp = 0;
cl_opts->num_iters = 1;
cl_opts->max_num_procs = comm_world_nprocs_g;
cl_opts->min_num_procs = 1;
- cl_opts->max_xfer_size = 1 * ONE_MB;
- cl_opts->min_xfer_size = 128 * ONE_KB;
- cl_opts->blk_size = 128 * ONE_KB; /* Default to writing 128K per block */
+ cl_opts->max_xfer_size = 0;
+ cl_opts->min_xfer_size = 0;
+ cl_opts->blk_size = 0;
cl_opts->interleaved = 0; /* Default to contiguous blocks in dataset */
cl_opts->collective = 0; /* Default to independent I/O access */
cl_opts->dim2d = 0; /* Default to 1D */
@@ -1361,10 +1356,10 @@ parse_command_line(int argc, char *argv[])
/* Turn on time printing */
cl_opts->print_times = TRUE;
break;
- case 'v':
+ case 'v':
/* Turn on verify data correctness*/
- cl_opts->verify = TRUE;
- break;
+ cl_opts->verify = TRUE;
+ break;
default:
fprintf(stderr, "pio_perf: invalid --debug option %s\n", buf);
exit(EXIT_FAILURE);
@@ -1428,28 +1423,50 @@ parse_command_line(int argc, char *argv[])
}
}
+
+ if (cl_opts->num_bpp == 0){
+ if (cl_opts->dim2d == 0)
+ cl_opts->num_bpp = 256 * ONE_KB;
+ else
+ cl_opts->num_bpp = 8 * ONE_KB;
+ }
+
+ if (cl_opts->max_xfer_size == 0)
+ cl_opts->max_xfer_size = cl_opts->num_bpp;
+
+ if (cl_opts->min_xfer_size == 0)
+ cl_opts->min_xfer_size = (cl_opts->num_bpp)/2;
+
+ if (cl_opts->blk_size == 0)
+ cl_opts->blk_size = (cl_opts->num_bpp)/2;
+
+
/* set default if none specified yet */
if (!cl_opts->io_types)
- cl_opts->io_types = PIO_HDF5 | PIO_MPI | PIO_POSIX; /* run all API */
+ cl_opts->io_types = PIO_HDF5 | PIO_MPI | PIO_POSIX; /* run all API */
/* verify parameters sanity. Adjust if needed. */
/* cap xfer_size with bytes per process */
- if (cl_opts->min_xfer_size > cl_opts->num_bpp)
- cl_opts->min_xfer_size = cl_opts->num_bpp;
- if (cl_opts->max_xfer_size > cl_opts->num_bpp)
- cl_opts->max_xfer_size = cl_opts->num_bpp;
+ if (!cl_opts->dim2d) {
+ if (cl_opts->min_xfer_size > cl_opts->num_bpp)
+ cl_opts->min_xfer_size = cl_opts->num_bpp;
+ if (cl_opts->max_xfer_size > cl_opts->num_bpp)
+ cl_opts->max_xfer_size = cl_opts->num_bpp;
+ }
if (cl_opts->min_xfer_size > cl_opts->max_xfer_size)
- cl_opts->min_xfer_size = cl_opts->max_xfer_size;
+ cl_opts->min_xfer_size = cl_opts->max_xfer_size;
+ if (cl_opts->blk_size > cl_opts->num_bpp )
+ cl_opts->blk_size = cl_opts->num_bpp;
/* check range of number of processes */
if (cl_opts->min_num_procs <= 0)
- cl_opts->min_num_procs = 1;
+ cl_opts->min_num_procs = 1;
if (cl_opts->max_num_procs <= 0)
- cl_opts->max_num_procs = 1;
+ cl_opts->max_num_procs = 1;
if (cl_opts->min_num_procs > cl_opts->max_num_procs)
- cl_opts->min_num_procs = cl_opts->max_num_procs;
+ cl_opts->min_num_procs = cl_opts->max_num_procs;
/* check iteration */
if (cl_opts->num_iters <= 0)
- cl_opts->num_iters = 1;
+ cl_opts->num_iters = 1;
return cl_opts;
}
@@ -1530,7 +1547,7 @@ usage(const char *prog)
#endif /* 0 */
printf(" -B S, --block-size=S Block size within transfer buffer\n");
printf(" (see below for description)\n");
- printf(" [default:128K]\n");
+ printf(" [default: half the number of bytes per processor per dataset]\n");
printf(" -c, --chunk Create HDF5 datasets chunked [default: off]\n");
printf(" -C, --collective Use collective I/O for MPI and HDF5 APIs\n");
printf(" [default: off (i.e. independent I/O)]\n");
@@ -1538,7 +1555,7 @@ usage(const char *prog)
printf(" -D DL, --debug=DL Indicate the debugging level\n");
printf(" [default: no debugging]\n");
printf(" -e S, --num-bytes=S Number of bytes per process per dataset\n");
- printf(" [default: 256K]\n");
+ printf(" [default: 256K for 1D, 8K for 2D]\n");
printf(" -F N, --num-files=N Number of files [default: 1]\n");
printf(" -g, --geometry Use 2D geometry [default: 1D]\n");
printf(" -i N, --num-iterations=N Number of iterations to perform [default: 1]\n");
@@ -1553,8 +1570,10 @@ usage(const char *prog)
printf(" -T S, --threshold=S Threshold for alignment of objects in HDF5 file\n");
printf(" [default: 1]\n");
printf(" -w, --write-only Perform write tests not the read tests\n");
- printf(" -x S, --min-xfer-size=S Minimum transfer buffer size [default: 128K]\n");
- printf(" -X S, --max-xfer-size=S Maximum transfer buffer size [default: 1M]\n");
+ printf(" -x S, --min-xfer-size=S Minimum transfer buffer size\n");
+ printf(" [default: half the number of bytes per processor per dataset]\n");
+ printf(" -X S, --max-xfer-size=S Maximum transfer buffer size\n");
+ printf(" [default: the number of bytes per processor per dataset]\n");
printf("\n");
printf(" F - is a filename.\n");
printf(" N - is an integer >=0.\n");