summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MANIFEST3
-rwxr-xr-xperform/build_h5perf_alone.sh28
-rw-r--r--perform/pio_engine.c2478
-rw-r--r--perform/pio_perf.c270
-rw-r--r--perform/pio_perf.h6
-rw-r--r--perform/pio_standalone.c282
-rw-r--r--perform/pio_standalone.h197
-rw-r--r--perform/pio_timer.c59
-rw-r--r--perform/pio_timer.h4
9 files changed, 2544 insertions, 783 deletions
diff --git a/MANIFEST b/MANIFEST
index e79276f..3d30501 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -1103,6 +1103,7 @@
./perform/Dependencies
./perform/Makefile.in
./perform/benchpar.c
+./perform/build_h5perf_alone.sh
./perform/chunk.c
./perform/gen_report.pl
./perform/iopipe.c
@@ -1113,6 +1114,8 @@
./perform/pio_engine.c
./perform/pio_perf.c
./perform/pio_perf.h
+./perform/pio_standalone.c
+./perform/pio_standalone.h
./perform/pio_timer.c
./perform/pio_timer.h
./perform/zip_perf.c
diff --git a/perform/build_h5perf_alone.sh b/perform/build_h5perf_alone.sh
new file mode 100755
index 0000000..b65e863
--- /dev/null
+++ b/perform/build_h5perf_alone.sh
@@ -0,0 +1,28 @@
+#! /bin/sh -x
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Name: build_h5perf_alone.sh
+#
+# Puporse: Build the h5perf tool by standalone mode.
+#
+# Created: Albert Cheng, 2005/09/18
+#
+# Modification:
+#
+
+# Default to use h5pcc to build h5perf unless $H5CC is defined.
+#
+h5pcc=${H5CC:-h5pcc}
+$h5pcc -DSTANDALONE pio_perf.c pio_engine.c pio_timer.c -o h5perf
diff --git a/perform/pio_engine.c b/perform/pio_engine.c
index 72dfae9..6124bce 100644
--- a/perform/pio_engine.c
+++ b/perform/pio_engine.c
@@ -44,31 +44,39 @@
/* Macro definitions */
+#if H5_VERS_MAJOR == 1 && H5_VERS_MINOR == 6
+# define H5DCREATE(fd, name, type, space, dcpl) H5Dcreate(fd, name, type, space, dcpl)
+# define H5DOPEN(fd, name) H5Dopen(fd, name)
+#else
+# define H5DCREATE(fd, name, type, space, dcpl) H5Dcreate2(fd, name, type, space, H5P_DEFAULT, dcpl, H5P_DEFAULT)
+# define H5DOPEN(fd, name) H5Dopen2(fd, name, H5P_DEFAULT)
+#endif
+
/* sizes of various items. these sizes won't change during program execution */
/* The following three must have the same type */
#define ELMT_SIZE (sizeof(unsigned char)) /* we're doing bytes */
#define ELMT_MPI_TYPE MPI_BYTE
#define ELMT_H5_TYPE H5T_NATIVE_UCHAR
-#define GOTOERROR(errcode) { ret_code = errcode; goto done; }
-#define GOTODONE { goto done; }
+#define GOTOERROR(errcode) { ret_code = errcode; goto done; }
+#define GOTODONE { goto done; }
#define ERRMSG(mesg) { \
fprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
fprintf(stderr, "*** Assertion failed (%s) at line %4d in %s\n", \
- mesg, (int)__LINE__, __FILE__); \
+ mesg, (int)__LINE__, __FILE__); \
}
#define MSG(mesg) { \
fprintf(stderr, "Proc %d: ", pio_mpi_rank_g); \
fprintf(stderr, "(%s) at line %4d in %s\n", \
- mesg, (int)__LINE__, __FILE__); \
+ mesg, (int)__LINE__, __FILE__); \
}
/* verify: if val is false (0), print mesg. */
#define VRFY(val, mesg) do { \
if (!val) { \
- ERRMSG(mesg); \
- GOTOERROR(FAIL); \
+ ERRMSG(mesg); \
+ GOTOERROR(FAIL); \
} \
} while(0)
@@ -88,9 +96,9 @@ enum {
};
/* Global variables */
-static int clean_file_g = -1; /*whether to cleanup temporary test */
- /*files. -1 is not defined; */
- /*0 is no cleanup; 1 is do cleanup */
+static int clean_file_g = -1; /*whether to cleanup temporary test */
+/*files. -1 is not defined; */
+/*0 is no cleanup; 1 is do cleanup */
/*
* In a parallel machine, the filesystem suitable for compiling is
@@ -100,7 +108,7 @@ static int clean_file_g = -1; /*whether to cleanup temporary test */
*/
#ifndef HDF5_PARAPREFIX
# ifdef __PUMAGON__
- /* For the PFS of TFLOPS */
+/* For the PFS of TFLOPS */
# define HDF5_PARAPREFIX "pfs:/pfs_grande/multi/tmp_1"
# else
# define HDF5_PARAPREFIX ""
@@ -120,13 +128,13 @@ typedef union _file_descr {
/* local functions */
static char *pio_create_filename(iotype iot, const char *base_name,
- char *fullname, size_t size);
+ char *fullname, size_t size);
static herr_t do_write(results *res, file_descr *fd, parameters *parms,
long ndsets, off_t nelmts, size_t buf_size, void *buffer);
static herr_t do_read(results *res, file_descr *fd, parameters *parms,
long ndsets, off_t nelmts, size_t buf_size, void *buffer /*out*/);
static herr_t do_fopen(parameters *param, char *fname, file_descr *fd /*out*/,
- int flags);
+ int flags);
static herr_t do_fclose(iotype iot, file_descr *fd);
static void do_cleanupfile(iotype iot, char *fname);
@@ -138,38 +146,45 @@ static void gpfs_clear_file_cache(int handle);
static void gpfs_cancel_hints(int handle);
static void gpfs_start_data_shipping(int handle, int num_insts);
static void gpfs_start_data_ship_map(int handle, int partition_size,
- int agent_count, int *agent_node_num);
+ int agent_count, int *agent_node_num);
static void gpfs_stop_data_shipping(int handle);
static void gpfs_invalidate_file_cache(const char *filename);
#endif /* H5_HAVE_GPFS */
/*
- * Function: do_pio
- * Purpose: PIO Engine where Parallel IO are executed.
- * Return: results
- * Programmer: Albert Cheng, Bill Wendling 2001/12/12
+ * Function: do_pio
+ * Purpose: PIO Engine where Parallel IO are executed.
+ * Return: results
+ * Programmer: Albert Cheng, Bill Wendling 2001/12/12
* Modifications:
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
-results
+ results
do_pio(parameters param)
{
/* return codes */
herr_t ret_code = 0; /*return code */
results res;
- file_descr fd;
+ file_descr fd;
iotype iot;
char fname[FILENAME_MAX];
- long nf;
+ long nf;
long ndsets;
- off_t nbytes; /* Number of bytes per dataset */
- char *buffer = NULL; /*data buffer pointer */
- size_t buf_size; /*data buffer size in bytes */
- size_t blk_size; /*data block size in bytes */
+ off_t nbytes; /*number of bytes per dataset */
+ off_t snbytes; /*general dataset size */
+ /*for 1D, it is the actual dataset size */
+ /*for 2D, it is the size of a side of the dataset square */
+ char *buffer = NULL; /*data buffer pointer */
+ size_t buf_size; /*general buffer size in bytes */
+ /*for 1D, it is the actual buffer size */
+ /*for 2D, it is the length of the buffer rectangle */
+ size_t blk_size; /*data block size in bytes */
+ size_t bsize; /*actual buffer size */
/* HDF5 variables */
- herr_t hrc; /*HDF5 return code */
+ herr_t hrc; /*HDF5 return code */
/* Sanity check parameters */
@@ -200,142 +215,164 @@ do_pio(parameters param)
buf_size = param.buf_size;
blk_size = param.blk_size;
+ if (!param.dim2d){
+ snbytes = nbytes; /* General dataset size */
+ bsize = buf_size; /* Actual buffer size */
+ }
+ else {
+ snbytes = (off_t)sqrt(nbytes); /* General dataset size */
+ bsize = buf_size * blk_size; /* Actual buffer size */
+ }
+
if (param.num_files < 0 ) {
- fprintf(stderr,
- "number of files must be >= 0 (%ld)\n",
- param.num_files);
- GOTOERROR(FAIL);
+ fprintf(stderr,
+ "number of files must be >= 0 (%ld)\n",
+ param.num_files);
+ GOTOERROR(FAIL);
}
if (ndsets < 0 ) {
- fprintf(stderr,
- "number of datasets per file must be >= 0 (%ld)\n",
- ndsets);
- GOTOERROR(FAIL);
+ fprintf(stderr,
+ "number of datasets per file must be >= 0 (%ld)\n",
+ ndsets);
+ GOTOERROR(FAIL);
}
if (param.num_procs <= 0 ) {
- fprintf(stderr,
- "maximum number of process to use must be > 0 (%d)\n",
- param.num_procs);
- GOTOERROR(FAIL);
+ fprintf(stderr,
+ "maximum number of process to use must be > 0 (%d)\n",
+ param.num_procs);
+ GOTOERROR(FAIL);
}
/* Validate transfer buffer size & block size*/
if(blk_size<=0) {
- HDfprintf(stderr,
- "Transfer block size (%Hd) must be > 0\n", (long_long)blk_size);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Transfer block size (%Hd) must be > 0\n", (long_long)blk_size);
+ GOTOERROR(FAIL);
}
if(buf_size<=0) {
- HDfprintf(stderr,
- "Transfer buffer size (%Hd) must be > 0\n", (long_long)buf_size);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Transfer buffer size (%Hd) must be > 0\n", (long_long)buf_size);
+ GOTOERROR(FAIL);
}
if ((buf_size % blk_size) != 0){
- HDfprintf(stderr,
- "Transfer buffer size (%Hd) must be a multiple of the "
- "interleaved I/O block size (%Hd)\n",
- (long_long)buf_size, (long_long)blk_size);
- GOTOERROR(FAIL);
+ HDfprintf(stderr,
+ "Transfer buffer size (%Hd) must be a multiple of the "
+ "interleaved I/O block size (%Hd)\n",
+ (long_long)buf_size, (long_long)blk_size);
+ GOTOERROR(FAIL);
}
- if((nbytes%pio_mpi_nprocs_g)!=0) {
- HDfprintf(stderr,
- "Dataset size (%Hd) must be a multiple of the "
- "number of processes (%d)\n",
- (long_long)nbytes, pio_mpi_nprocs_g);
- GOTOERROR(FAIL);
+ if((snbytes%pio_mpi_nprocs_g)!=0) {
+ HDfprintf(stderr,
+ "Dataset size (%Hd) must be a multiple of the "
+ "number of processes (%d)\n",
+ (long_long)snbytes, pio_mpi_nprocs_g);
+ GOTOERROR(FAIL);
}
- if(((nbytes/pio_mpi_nprocs_g)%buf_size)!=0) {
+
+ if (!param.dim2d){
+ if(((snbytes/pio_mpi_nprocs_g)%buf_size)!=0) {
HDfprintf(stderr,
"Dataset size/process (%Hd) must be a multiple of the "
"trasfer buffer size (%Hd)\n",
- (long_long)(nbytes/pio_mpi_nprocs_g), (long_long)buf_size);
- GOTOERROR(FAIL);
+ (long_long)(snbytes/pio_mpi_nprocs_g), (long_long)buf_size);
+ GOTOERROR(FAIL);
+ }
+ }
+ else {
+ if((snbytes%buf_size)!=0) {
+ HDfprintf(stderr,
+ "Dataset side size (%Hd) must be a multiple of the "
+ "trasfer buffer size (%Hd)\n",
+ (long_long)snbytes, (long_long)buf_size);
+ GOTOERROR(FAIL);
+ }
}
/* Allocate transfer buffer */
- if ((buffer = malloc(buf_size)) == NULL){
- HDfprintf(stderr, "malloc for transfer buffer size (%Hd) failed\n",
- (long_long)(buf_size));
- GOTOERROR(FAIL);
+ if ((buffer = malloc(bsize)) == NULL){
+ HDfprintf(stderr, "malloc for transfer buffer size (%Hd) failed\n",
+ (long_long)(bsize));
+ GOTOERROR(FAIL);
}
if (pio_debug_level >= 4) {
- int myrank;
+ int myrank;
- MPI_Comm_rank(pio_comm_g, &myrank);
+ MPI_Comm_rank(pio_comm_g, &myrank);
- /* output all of the times for all iterations */
- if (myrank == 0)
- fprintf(output, "Timer details:\n");
+ /* output all of the times for all iterations */
+ if (myrank == 0)
+ fprintf(output, "Timer details:\n");
}
for (nf = 1; nf <= param.num_files; nf++) {
- /*
- * Write performance measurement
- */
- /* Open file for write */
- char base_name[256];
-
- sprintf(base_name, "#pio_tmp_%lu", nf);
- pio_create_filename(iot, base_name, fname, sizeof(fname));
- if (pio_debug_level > 0)
- HDfprintf(output, "rank %d: data filename=%s\n",
- pio_mpi_rank_g, fname);
-
- /* Need barrier to make sure everyone starts at the same time */
- MPI_Barrier(pio_comm_g);
+ /*
+ * Write performance measurement
+ */
+ /* Open file for write */
+ char base_name[256];
- set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, START);
- hrc = do_fopen(&param, fname, &fd, PIO_CREATE | PIO_WRITE);
+ sprintf(base_name, "#pio_tmp_%lu", nf);
+ pio_create_filename(iot, base_name, fname, sizeof(fname));
+ if (pio_debug_level > 0)
+ HDfprintf(output, "rank %d: data filename=%s\n",
+ pio_mpi_rank_g, fname);
- VRFY((hrc == SUCCESS), "do_fopen failed");
+ /* Need barrier to make sure everyone starts at the same time */
+ MPI_Barrier(pio_comm_g);
- set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, START);
- hrc = do_write(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
- set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, STOP);
+ set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, START);
+ hrc = do_fopen(&param, fname, &fd, PIO_CREATE | PIO_WRITE);
- VRFY((hrc == SUCCESS), "do_write failed");
+ VRFY((hrc == SUCCESS), "do_fopen failed");
- /* Close file for write */
- hrc = do_fclose(iot, &fd);
+ set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, START);
+ hrc = do_write(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
+ hrc == SUCCESS;
+ set_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS, STOP);
- set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, STOP);
- VRFY((hrc == SUCCESS), "do_fclose failed");
+ VRFY((hrc == SUCCESS), "do_write failed");
+
+ /* Close file for write */
+ hrc = do_fclose(iot, &fd);
+
+ set_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS, STOP);
+ VRFY((hrc == SUCCESS), "do_fclose failed");
- if (!param.h5_write_only) {
- /*
- * Read performance measurement
- */
- /* Need barrier to make sure everyone is done writing and has
- * closed the file. Also to make sure everyone starts reading
- * at the same time.
- */
- MPI_Barrier(pio_comm_g);
+ if (!param.h5_write_only) {
+ /*
+ * Read performance measurement
+ */
+ /* Need barrier to make sure everyone is done writing and has
+ * closed the file. Also to make sure everyone starts reading
+ * at the same time.
+ */
+ MPI_Barrier(pio_comm_g);
- /* Open file for read */
- set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, START);
- hrc = do_fopen(&param, fname, &fd, PIO_READ);
+ /* Open file for read */
+ set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, START);
+ hrc = do_fopen(&param, fname, &fd, PIO_READ);
- VRFY((hrc == SUCCESS), "do_fopen failed");
+ VRFY((hrc == SUCCESS), "do_fopen failed");
- set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, START);
- hrc = do_read(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
- set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, STOP);
- VRFY((hrc == SUCCESS), "do_read failed");
+ set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, START);
+ hrc = do_read(&res, &fd, &param, ndsets, nbytes, buf_size, buffer);
+ set_time(res.timers, HDF5_FINE_READ_FIXED_DIMS, STOP);
+ VRFY((hrc == SUCCESS), "do_read failed");
- /* Close file for read */
- hrc = do_fclose(iot, &fd);
+ /* Close file for read */
+ hrc = do_fclose(iot, &fd);
- set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, STOP);
- VRFY((hrc == SUCCESS), "do_fclose failed");
- }
+ set_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS, STOP);
+ VRFY((hrc == SUCCESS), "do_fclose failed");
+ }
- /* Need barrier to make sure everyone is done with the file */
- /* before it may be removed by do_cleanupfile */
- MPI_Barrier(pio_comm_g);
- do_cleanupfile(iot, fname);
+ /* Need barrier to make sure everyone is done with the file */
+ /* before it may be removed by do_cleanupfile */
+ MPI_Barrier(pio_comm_g);
+ do_cleanupfile(iot, fname);
}
done:
@@ -347,21 +384,21 @@ done:
switch (iot) {
case POSIXIO:
if (fd.posixfd != -1)
- hrc = do_fclose(iot, &fd);
+ hrc = do_fclose(iot, &fd);
break;
case MPIO:
if (fd.mpifd != MPI_FILE_NULL)
- hrc = do_fclose(iot, &fd);
+ hrc = do_fclose(iot, &fd);
break;
case PHDF5:
if (fd.h5fd != -1)
- hrc = do_fclose(iot, &fd);
+ hrc = do_fclose(iot, &fd);
break;
}
/* release generic resources */
if(buffer)
- free(buffer);
+ free(buffer);
res.ret_code = ret_code;
return res;
}
@@ -376,7 +413,7 @@ done:
* Programmer: Bill Wendling, 21. November 2001
* Modifications:
*/
-static char *
+ static char *
pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t size)
{
const char *prefix, *suffix="";
@@ -384,7 +421,7 @@ pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si
size_t i, j;
if (!base_name || !fullname || size < 1)
- return NULL;
+ return NULL;
memset(fullname, 0, size);
@@ -405,134 +442,143 @@ pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si
#ifdef HDF5_PARAPREFIX
if (!prefix)
- prefix = HDF5_PARAPREFIX;
+ prefix = HDF5_PARAPREFIX;
#endif /* HDF5_PARAPREFIX */
/* Prepend the prefix value to the base name */
if (prefix && *prefix) {
- /* If the prefix specifies the HDF5_PARAPREFIX directory, then
- * default to using the "/tmp/$USER" or "/tmp/$LOGIN"
- * directory instead. */
- register char *user, *login, *subdir;
+ /* If the prefix specifies the HDF5_PARAPREFIX directory, then
+ * default to using the "/tmp/$USER" or "/tmp/$LOGIN"
+ * directory instead. */
+ register char *user, *login, *subdir;
- user = getenv("USER");
- login = getenv("LOGIN");
- subdir = (user ? user : login);
+ user = getenv("USER");
+ login = getenv("LOGIN");
+ subdir = (user ? user : login);
- if (subdir) {
- for (i = 0; i < size && prefix[i]; i++)
- fullname[i] = prefix[i];
+ if (subdir) {
+ for (i = 0; i < size && prefix[i]; i++)
+ fullname[i] = prefix[i];
- fullname[i++] = '/';
+ fullname[i++] = '/';
- for (j = 0; i < size && subdir[j]; i++, j++)
- fullname[i] = subdir[j];
- } else {
- /* We didn't append the prefix yet */
- strncpy(fullname, prefix, MIN(strlen(prefix), size));
- }
-
- if ((strlen(fullname) + strlen(base_name) + 1) < size) {
- /* Append the base_name with a slash first. Multiple slashes are
- * handled below. */
- h5_stat_t buf;
-
- if (HDstat(fullname, &buf) < 0)
- /* The directory doesn't exist just yet */
- if (mkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST) {
- /* We couldn't make the "/tmp/${USER,LOGIN}" subdirectory.
- * Default to PREFIX's original prefix value. */
- strcpy(fullname, prefix);
- }
+ for (j = 0; i < size && subdir[j]; i++, j++)
+ fullname[i] = subdir[j];
+ } else {
+ /* We didn't append the prefix yet */
+ strncpy(fullname, prefix, MIN(strlen(prefix), size));
+ }
- strcat(fullname, "/");
- strcat(fullname, base_name);
- } else {
- /* Buffer is too small */
- return NULL;
+ if ((strlen(fullname) + strlen(base_name) + 1) < size) {
+ /* Append the base_name with a slash first. Multiple slashes are
+ * handled below. */
+ h5_stat_t buf;
+
+ if (HDstat(fullname, &buf) < 0)
+ /* The directory doesn't exist just yet */
+ if (mkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST) {
+ /* We couldn't make the "/tmp/${USER,LOGIN}" subdirectory.
+ * Default to PREFIX's original prefix value. */
+ strcpy(fullname, prefix);
}
- } else if (strlen(base_name) >= size) {
+
+ strcat(fullname, "/");
+ strcat(fullname, base_name);
+ } else {
/* Buffer is too small */
return NULL;
+ }
+ } else if (strlen(base_name) >= size) {
+ /* Buffer is too small */
+ return NULL;
} else {
- strcpy(fullname, base_name);
+ strcpy(fullname, base_name);
}
/* Append a suffix */
if (suffix) {
- if (strlen(fullname) + strlen(suffix) >= size)
- return NULL;
+ if (strlen(fullname) + strlen(suffix) >= size)
+ return NULL;
- strcat(fullname, suffix);
+ strcat(fullname, suffix);
}
/* Remove any double slashes in the filename */
for (ptr = fullname, i = j = 0; ptr && i < size; i++, ptr++) {
- if (*ptr != '/' || last != '/')
- fullname[j++] = *ptr;
+ if (*ptr != '/' || last != '/')
+ fullname[j++] = *ptr;
- last = *ptr;
+ last = *ptr;
}
return fullname;
}
/*
- * Function: do_write
- * Purpose: Write the required amount of data to the file.
- * Return: SUCCESS or FAIL
- * Programmer: Albert Cheng, Bill Wendling, 2001/12/13
+ * Function: do_write
+ * Purpose: Write the required amount of data to the file.
+ * Return: SUCCESS or FAIL
+ * Programmer: Albert Cheng, Bill Wendling, 2001/12/13
* Modifications:
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
-static herr_t
+ static herr_t
do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
- off_t nbytes, size_t buf_size, void *buffer)
+ off_t nbytes, size_t buf_size, void *buffer)
{
int ret_code = SUCCESS;
int rc; /*routine return code */
long ndset;
size_t blk_size; /* The block size to subdivide the xfer buffer into */
off_t nbytes_xfer; /* Total number of bytes transferred so far */
+ size_t nbytes_xfer_advance; /* Number of bytes transferred in a single I/O operation */
size_t nbytes_toxfer; /* Number of bytes to transfer a particular time */
char dname[64];
off_t dset_offset=0; /*dataset offset in a file */
- off_t bytes_begin; /*first elmt this process transfer */
+ off_t bytes_begin[2]; /*first elmt this process transfer */
off_t bytes_count; /*number of elmts this process transfer */
+ off_t snbytes=0; /*size of a side of the dataset square */
unsigned char *buf_p; /* Current buffer pointer */
/* POSIX variables */
off_t file_offset; /* File offset of the next transfer */
+ off_t file_offset_advance; /* File offset advance after each I/O operation */
off_t posix_file_offset; /* Base file offset of the next transfer */
/* MPI variables */
- MPI_Offset mpi_file_offset;/* Base file offset of the next transfer*/
- MPI_Offset mpi_offset; /* Offset in MPI file */
- MPI_Datatype mpi_file_type; /* MPI derived type for file */
- MPI_Datatype mpi_blk_type; /* MPI derived type for buffer */
- MPI_Status mpi_status;
- int mrc; /* MPI return code */
+ MPI_Offset mpi_file_offset; /* Base file offset of the next transfer*/
+ MPI_Offset mpi_offset; /* Offset in MPI file */
+ MPI_Offset mpi_offset_advance; /* Offset advance after each I/O operation */
+ MPI_Datatype mpi_file_type; /* MPI derived type for 1D file */
+ MPI_Datatype mpi_blk_type; /* MPI derived type for 1D buffer */
+ MPI_Datatype mpi_cont_type; /* MPI derived type for 2D contiguous file */
+ MPI_Datatype mpi_partial_buffer_cont; /* MPI derived type for partial 2D contiguous buffer */
+ MPI_Datatype mpi_inter_type; /* MPI derived type for 2D interleaved file */
+ MPI_Datatype mpi_partial_buffer_inter; /* MPI derived type for partial 2D interleaved buffer */
+ MPI_Datatype mpi_full_buffer; /* MPI derived type for 2D full buffer */
+ MPI_Datatype mpi_full_chunk; /* MPI derived type for 2D full chunk */
+ MPI_Datatype mpi_chunk_inter_type; /* MPI derived type for 2D chunk interleaved file */
+ MPI_Datatype mpi_collective_type; /* Generic MPI derived type for 2D collective access */
+ MPI_Status mpi_status;
+ int mrc; /* MPI return code */
/* HDF5 variables */
herr_t hrc; /*HDF5 return code */
- hsize_t h5dims[1]; /*dataset dim sizes */
+ hsize_t h5dims[2]; /*dataset dim sizes */
hid_t h5dset_space_id = -1; /*dataset space ID */
hid_t h5mem_space_id = -1; /*memory dataspace ID */
hid_t h5ds_id = -1; /*dataset handle */
- hsize_t h5block[1]; /*dataspace selection */
- hsize_t h5stride[1];
- hsize_t h5count[1];
- hsize_t h5start[1];
- hssize_t h5offset[1]; /* Selection offset within dataspace */
+ hsize_t h5block[2]; /*dataspace selection */
+ hsize_t h5stride[2];
+ hsize_t h5count[2];
+ hsize_t h5start[2];
+ hssize_t h5offset[2]; /* Selection offset within dataspace */
hid_t h5dcpl = -1; /* Dataset creation property list */
hid_t h5dxpl = -1; /* Dataset transfer property list */
/* Get the parameters from the parameter block */
blk_size=parms->blk_size;
- /* Prepare buffer for verifying data */
- if (parms->verify)
- memset(buffer,pio_mpi_rank_g,buf_size);
-
/* There are two kinds of transfer patterns, contiguous and interleaved.
* Let 0,1,2,...,n be data accessed by process 0,1,2,...,n
* where n is rank of the last process.
@@ -542,15 +588,50 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
* 012...n012...n...
* These are all in the scope of one dataset.
*/
- if (parms->interleaved==0) {
+
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Contiguous Pattern: */
- bytes_begin = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
- } /* end if */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
+ } /* end if */
+ /* Interleaved Pattern: */
+ else {
+ bytes_begin[0] = (off_t)(blk_size*pio_mpi_rank_g);
+ } /* end else */
+
+ /* Prepare buffer for verifying data */
+ if (parms->verify)
+ memset(buffer,pio_mpi_rank_g+1,buf_size);
+ }/* end if */
+ /* 2D dataspace */
else {
+ /* nbytes is always the number of bytes per dataset (1D or 2D). If the
+ dataspace is 2D, snbytes is the size of a side of the dataset square.
+ */
+ snbytes = (off_t)sqrt(nbytes);
+
+ /* Contiguous Pattern: */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)((double)snbytes*pio_mpi_rank_g / pio_mpi_nprocs_g);
+ bytes_begin[1] = 0;
+ } /* end if */
/* Interleaved Pattern: */
- bytes_begin = (off_t)(blk_size*pio_mpi_rank_g);
+ else {
+ bytes_begin[0] = 0;
+
+ if(!parms->h5_use_chunks || parms->io_type==PHDF5)
+ bytes_begin[1] = (off_t)(blk_size*pio_mpi_rank_g);
+ else
+ bytes_begin[1] = (off_t)(blk_size*blk_size*pio_mpi_rank_g);
+ } /* end else */
+
+ /* Prepare buffer for verifying data */
+ if (parms->verify)
+ memset(buffer,pio_mpi_rank_g+1,buf_size*blk_size);
} /* end else */
+
/* Calculate the total number of bytes (bytes_count) to be
* transferred by this process. It may be different for different
* transfer pattern due to rounding to integral values.
@@ -562,32 +643,41 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
* (This is tricky, don't mess with the formula, rounding errors
* can easily get introduced) */
bytes_count = (off_t)(((double)nbytes*(pio_mpi_rank_g+1)) / pio_mpi_nprocs_g)
- - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
+ - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
/* debug */
if (pio_debug_level >= 4) {
HDprint_rank(output);
+ if (!parms->dim2d) {
HDfprintf(output, "Debug(do_write): "
"buf_size=%Hd, bytes_begin=%Hd, bytes_count=%Hd\n",
- (long_long)buf_size, (long_long)bytes_begin,
+ (long_long)buf_size, (long_long)bytes_begin[0],
(long_long)bytes_count);
+ } else {
+ HDfprintf(output, "Debug(do_write): "
+ "linear buf_size=%Hd, bytes_begin=(%Hd,%Hd), bytes_count=%Hd\n",
+ (long_long)buf_size*blk_size, (long_long)bytes_begin[0],
+ (long_long)bytes_begin[1], (long_long)bytes_count);
+ }
}
/* I/O Access specific setup */
switch (parms->io_type) {
- case POSIXIO:
- /* No extra setup */
- break;
+ case POSIXIO:
+ /* No extra setup */
+ break;
- case MPIO: /* MPI-I/O setup */
+ case MPIO: /* MPI-I/O setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Build block's derived type */
mrc = MPI_Type_contiguous((int)blk_size,
- MPI_BYTE, &mpi_blk_type);
+ MPI_BYTE, &mpi_blk_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
/* Build file's derived type */
mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1,
- (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
+ (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
/* Commit file type */
@@ -597,32 +687,102 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Commit buffer type */
mrc = MPI_Type_commit( &mpi_blk_type );
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- break;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Build partial buffer derived type for contiguous access */
- case PHDF5: /* HDF5 setup */
+ mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
+ &mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build contiguous file's derived type */
+ mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
+ mpi_partial_buffer_cont, &mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit contiguous file type */
+ mrc = MPI_Type_commit(&mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build partial buffer derived type for interleaved access */
+ mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
+ &mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build interleaved file's derived type */
+ mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
+ mpi_partial_buffer_inter, &mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit interleaved file type */
+ mrc = MPI_Type_commit(&mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full buffer derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*buf_size), MPI_BYTE,
+ &mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full buffer derived type */
+ mrc = MPI_Type_commit(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full chunk derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*blk_size), MPI_BYTE,
+ &mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full chunk derived type */
+ mrc = MPI_Type_commit(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build chunk interleaved file's derived type */
+ mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1, (int)(snbytes/blk_size),
+ mpi_full_chunk, &mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit chunk interleaved file type */
+ mrc = MPI_Type_commit(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ } /* end else */
+ break;
+
+ case PHDF5: /* HDF5 setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
if(nbytes>0) {
- /* define a contiquous dataset of nbytes native bytes */
+ /* define a contiguous dataset of nbytes native bytes */
h5dims[0] = nbytes;
h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
VRFY((h5dset_space_id >= 0), "H5Screate_simple");
/* Set up the file dset space id to select the pattern to access */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5start[0] = bytes_begin;
- h5stride[0] = h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
} /* end if */
else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5start[0] = bytes_begin;
- h5stride[0] = blk_size*pio_mpi_nprocs_g;
- h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
} /* end else */
hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
+ h5start, h5stride, h5count, h5block);
VRFY((hrc >= 0), "H5Sselect_hyperslab");
} /* end if */
else {
@@ -640,98 +800,184 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
h5mem_space_id = H5Screate(H5S_SCALAR);
VRFY((h5mem_space_id >= 0), "H5Screate");
} /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = snbytes;
+ h5dims[1] = snbytes;
+ h5dset_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
- /* Create the dataset transfer property list */
- h5dxpl = H5Pcreate(H5P_DATASET_XFER);
- if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
- }
-
- /* Change to collective I/O, if asked */
- if(parms->collective) {
- hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = 1;
+ h5stride[1] = h5block[0] = h5block[1] = blk_size;
+ h5count[0] = 1;
+ h5count[1] = buf_size/blk_size;
} /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = blk_size;
+ h5stride[1] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = h5block[1] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ h5count[1] = 1;
+ } /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
} /* end if */
- break;
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
+
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ if (!parms->interleaved){
+ h5dims[0] = blk_size;
+ h5dims[1] = buf_size;
+ }else{
+ h5dims[0] = buf_size;
+ h5dims[1] = blk_size;
+ }
+ h5mem_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end else */
+
+ /* Create the dataset transfer property list */
+ h5dxpl = H5Pcreate(H5P_DATASET_XFER);
+ if (h5dxpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ /* Change to collective I/O, if asked */
+ if(parms->collective) {
+ hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ } /* end if */
+ } /* end if */
+ break;
} /* end switch */
for (ndset = 1; ndset <= ndsets; ++ndset) {
- /* Calculate dataset offset within a file */
+ /* Calculate dataset offset within a file */
- /* create dataset */
- switch (parms->io_type) {
+ /* create dataset */
+ switch (parms->io_type) {
case POSIXIO:
case MPIO:
- /* both posix and mpi io just need dataset offset in file*/
- dset_offset = (ndset - 1) * nbytes;
- break;
+ /* both posix and mpi io just need dataset offset in file*/
+ dset_offset = (ndset - 1) * nbytes;
+ break;
case PHDF5:
- h5dcpl = H5Pcreate(H5P_DATASET_CREATE);
- if (h5dcpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
- }
-
+ h5dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ if (h5dcpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Make the dataset chunked if asked */
if(parms->h5_use_chunks) {
- /* Set the chunk size to be the same as the buffer size */
- h5dims[0] = buf_size;
- hrc = H5Pset_chunk(h5dcpl, 1, h5dims);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- } /* end if */
- } /* end if */
-
- sprintf(dname, "Dataset_%ld", ndset);
- h5ds_id = H5Dcreate(fd->h5fd, dname, ELMT_H5_TYPE,
- h5dset_space_id, h5dcpl);
-
- if (h5ds_id < 0) {
- fprintf(stderr, "HDF5 Dataset Create failed\n");
+ /* Set the chunk size to be the same as the buffer size */
+ h5dims[0] = blk_size;
+ hrc = H5Pset_chunk(h5dcpl, 1, h5dims);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
- }
-
- hrc = H5Pclose(h5dcpl);
- /* verifying the close of the dcpl */
+ } /* end if */
+ } /* end if */
+ }/* end if */
+ else{
+ /* 2D dataspace */
+ if(parms->h5_use_chunks) {
+ /* Set the chunk size to be the same as the block size */
+ h5dims[0] = blk_size;
+ h5dims[1] = blk_size;
+ hrc = H5Pset_chunk(h5dcpl, 2, h5dims);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Close failed\n");
+ fprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
- }
+ } /* end if */
+ } /* end if */
+ }/* end else */
- break;
+ sprintf(dname, "Dataset_%ld", ndset);
+ h5ds_id = H5DCREATE(fd->h5fd, dname, ELMT_H5_TYPE,
+ h5dset_space_id, h5dcpl);
+
+ if (h5ds_id < 0) {
+ fprintf(stderr, "HDF5 Dataset Create failed\n");
+ GOTOERROR(FAIL);
}
- /* The task is to transfer bytes_count bytes, starting at
- * bytes_begin position, using transfer buffer of buf_size bytes.
- * If interleaved, select buf_size at a time, in round robin
- * fashion, according to number of process. Otherwise, select
- * all bytes_count in contiguous.
- */
- nbytes_xfer = 0 ;
+ hrc = H5Pclose(h5dcpl);
+ /* verifying the close of the dcpl */
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Close failed\n");
+ GOTOERROR(FAIL);
+ }
+ break;
+ }
+
+ /* The task is to transfer bytes_count bytes, starting at
+ * bytes_begin position, using transfer buffer of buf_size bytes.
+ * If interleaved, select buf_size at a time, in round robin
+ * fashion, according to number of process. Otherwise, select
+ * all bytes_count in contiguous.
+ */
+ nbytes_xfer = 0 ;
+
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Set base file offset for all I/O patterns and POSIX access */
- posix_file_offset = dset_offset + bytes_begin;
+ posix_file_offset = dset_offset + bytes_begin[0];
/* Set base file offset for all I/O patterns and MPI access */
- mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin);
+ mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin[0]);
+ } /* end if */
+ else {
+ /* Set base file offset for all I/O patterns and POSIX access */
+ posix_file_offset=dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1];
- /* Start "raw data" write timer */
- set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, START);
+ /* Set base file offset for all I/O patterns and MPI access */
+ mpi_file_offset=(MPI_Offset)(dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1]);
+ } /* end else */
- while (nbytes_xfer < bytes_count){
- /* Write */
- /* Calculate offset of write within a dataset/file */
- switch (parms->io_type) {
- case POSIXIO:
+ /* Start "raw data" write timer */
+ set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, START);
+
+ while (nbytes_xfer < bytes_count){
+ /* Write */
+ /* Calculate offset of write within a dataset/file */
+ switch (parms->io_type) {
+ case POSIXIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Contiguous pattern */
- if (parms->interleaved==0) {
+ if (!parms->interleaved) {
/* Compute file offset */
file_offset = posix_file_offset + (off_t)nbytes_xfer;
@@ -780,21 +1026,123 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
nbytes_toxfer-=blk_size;
} /* end while */
} /* end else */
- break;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/blk_size)
+ /snbytes)*(blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
- case MPIO:
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)((((nbytes_xfer/buf_size)
+ *pio_mpi_nprocs_g)/snbytes)*(buf_size*snbytes)
+ +((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end else */
+ } /* end if */
+ /* Chunked storage */
+ else {
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = 0;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ /* Before simplification */
+ /* file_offset=posix_file_offset+(off_t)((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*(buf_size/blk_size
+ *snbytes/blk_size*(blk_size*blk_size))+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes/blk_size*(blk_size*blk_size))); */
+
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes*blk_size));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* file_offset_advance = (off_t)(snbytes/blk_size*(blk_size*blk_size)); */
+ file_offset_advance = (off_t)(snbytes*blk_size);
+ } /* end else */
+ } /* end else */
+
+ /* Common code for file access */
+
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size*blk_size;
+
+ /* Loop over portions of the buffer to write */
+ while(nbytes_toxfer>0){
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are written */
+ rc = ((ssize_t)nbytes_xfer_advance ==
+ POSIXWRITE(fd->posixfd, buf_p, nbytes_xfer_advance));
+ VRFY((rc != 0), "POSIXWRITE");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
+
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance file offset */
+ file_offset+=file_offset_advance;
+ } /* end while */
+
+ } /* end else */
+
+ break;
+
+ case MPIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Independent file access */
- if(parms->collective==0) {
+ if(!parms->collective) {
/* Contiguous pattern */
- if (parms->interleaved==0){
+ if (!parms->interleaved){
/* Compute offset in file */
mpi_offset = mpi_file_offset +
nbytes_xfer;
/* Perform independent write */
mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type,
- &mpi_status);
+ (int)(buf_size/blk_size), mpi_blk_type,
+ &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
/* Advance global offset in dataset */
@@ -812,11 +1160,11 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
while(nbytes_toxfer>0) {
/* Skip offset over blocks of other processes */
mpi_offset = mpi_file_offset +
- (nbytes_xfer*pio_mpi_nprocs_g);
+ (nbytes_xfer*pio_mpi_nprocs_g);
/* Perform independent write */
mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p,
- (int)1, mpi_blk_type, &mpi_status);
+ (int)1, mpi_blk_type, &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
/* Advance location in buffer */
@@ -833,14 +1181,14 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Collective file access */
else {
/* Contiguous access pattern */
- if (parms->interleaved==0){
+ if (!parms->interleaved){
/* Compute offset in file */
mpi_offset = mpi_file_offset +
nbytes_xfer;
/* Perform independent write */
mrc = MPI_File_write_at_all(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
/* Advance global offset in dataset */
@@ -854,156 +1202,358 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Set the file view */
mrc = MPI_File_set_view(fd->mpifd, mpi_offset, mpi_blk_type,
- mpi_file_type, (char*)"native", h5_io_info_g);
+ mpi_file_type, (char*)"native", h5_io_info_g);
VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
/* Perform write */
mrc = MPI_File_write_at_all(fd->mpifd, 0, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
/* Advance global offset in dataset */
nbytes_xfer+=buf_size;
} /* end else */
} /* end else */
- break;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/snbytes)*
+ (blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes);
- case PHDF5:
- /* Set up the file dset space id to move the selection to process */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5offset[0] = nbytes_xfer;
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_cont_type;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+(((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/snbytes)*
+ (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_inter_type;
+ } /* end else */
} /* end if */
+ /* Chunked storage */
else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = 0;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_full_buffer;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ /* Before simplification */
+ /* mpi_offset=mpi_file_offset+(nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*
+ (buf_size/blk_size*snbytes/blk_size*(blk_size*blk_size))+
+ ((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes
+ /blk_size*(blk_size*blk_size)); */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)
+ +((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes*blk_size);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* mpi_offset_advance = (MPI_Offset)(snbytes/blk_size*(blk_size*blk_size)); */
+ mpi_offset_advance = (MPI_Offset)(snbytes*blk_size);
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_chunk_inter_type;
+ } /* end else */
} /* end else */
- hrc = H5Soffset_simple(h5dset_space_id, h5offset);
- VRFY((hrc >= 0), "H5Soffset_simple");
- /* Write the buffer out */
- hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
- h5dset_space_id, h5dxpl, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
+ /* Common code for independent file access */
+ if (!parms->collective) {
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
- /* Increment number of bytes transferred */
- nbytes_xfer += buf_size;
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size * blk_size;
- break;
- } /* switch (parms->io_type) */
- } /* end while */
+ /* Loop over portions of the buffer to write */
+ while(nbytes_toxfer>0){
+ /* Perform independent write */
+ mrc = MPI_File_write_at(fd->mpifd, mpi_offset, buf_p,
+ (int)nbytes_xfer_advance, MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
- /* Stop "raw data" write timer */
- set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, STOP);
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
- /* Calculate write time */
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
- /* Close dataset. Only HDF5 needs to do an explicit close. */
- if (parms->io_type == PHDF5) {
- hrc = H5Dclose(h5ds_id);
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
- GOTOERROR(FAIL);
- }
+ /* Partially advance global offset in dataset */
+ mpi_offset+=mpi_offset_advance;
+ } /* end while */
+ } /* end if */
- h5ds_id = -1;
- } /* end if */
+ /* Common code for collective file access */
+ else {
+ /* Set the file view */
+ mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE,
+ mpi_collective_type, (char *)"native", h5_io_info_g);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
+
+ /* Perform write */
+ MPI_File_write_at_all(fd->mpifd, 0, buffer,(int)(buf_size*blk_size),
+ MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_WRITE");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size*blk_size;
+ } /* end else */
+
+ } /* end else */
+
+ break;
+
+ case PHDF5:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = nbytes_xfer;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Write the buffer out */
+ hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = (nbytes_xfer/(snbytes*blk_size))*blk_size;
+ h5offset[1] = (nbytes_xfer%(snbytes*blk_size))/blk_size;
+
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = ((nbytes_xfer*pio_mpi_nprocs_g)/(snbytes*buf_size))*buf_size;
+ h5offset[1] = ((nbytes_xfer*pio_mpi_nprocs_g)%(snbytes*buf_size))/buf_size;
+
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Write the buffer out */
+ hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size*blk_size;
+
+ } /* end else */
+
+ break;
+ } /* switch (parms->io_type) */
+ } /* end while */
+
+ /* Stop "raw data" write timer */
+ set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, STOP);
+
+ /* Calculate write time */
+
+ /* Close dataset. Only HDF5 needs to do an explicit close. */
+ if (parms->io_type == PHDF5) {
+ hrc = H5Dclose(h5ds_id);
+
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Close failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ h5ds_id = -1;
+ } /* end if */
} /* end for */
done:
/* release MPI-I/O objects */
if (parms->io_type == MPIO) {
- /* Free file type */
- mrc = MPI_Type_free( &mpi_file_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Free file type */
+ mrc = MPI_Type_free( &mpi_file_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free buffer type */
+ mrc = MPI_Type_free( &mpi_blk_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Free partial buffer type for contiguous access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_cont );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free contiguous file type */
+ mrc = MPI_Type_free( &mpi_cont_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free partial buffer type for interleaved access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_inter );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free interleaved file type */
+ mrc = MPI_Type_free( &mpi_inter_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full buffer type */
+ mrc = MPI_Type_free(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
- /* Free buffer type */
- mrc = MPI_Type_free( &mpi_blk_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ /* Free full chunk type */
+ mrc = MPI_Type_free(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free chunk interleaved file type */
+ mrc = MPI_Type_free(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end else */
} /* end if */
/* release HDF5 objects */
if (h5dset_space_id != -1) {
- hrc = H5Sclose(h5dset_space_id);
- if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5dset_space_id = -1;
- }
+ hrc = H5Sclose(h5dset_space_id);
+ if (hrc < 0){
+ fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dset_space_id = -1;
+ }
}
if (h5mem_space_id != -1) {
- hrc = H5Sclose(h5mem_space_id);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5mem_space_id = -1;
- }
+ hrc = H5Sclose(h5mem_space_id);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5mem_space_id = -1;
+ }
}
if (h5dxpl != -1) {
- hrc = H5Pclose(h5dxpl);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
- ret_code = FAIL;
- } else {
- h5dxpl = -1;
- }
+ hrc = H5Pclose(h5dxpl);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dxpl = -1;
+ }
}
return ret_code;
}
/*
- * Function: do_read
- * Purpose: read the required amount of data from the file.
- * Return: SUCCESS or FAIL
- * Programmer: Albert Cheng 2001/12/13
+ * Function: do_read
+ * Purpose: read the required amount of data from the file.
+ * Return: SUCCESS or FAIL
+ * Programmer: Albert Cheng 2001/12/13
* Modifications:
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
-static herr_t
+ static herr_t
do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
- off_t nbytes, size_t buf_size, void *buffer /*out*/)
+ off_t nbytes, size_t buf_size, void *buffer /*out*/)
{
int ret_code = SUCCESS;
int rc; /*routine return code */
long ndset;
size_t blk_size; /* The block size to subdivide the xfer buffer into */
+ size_t bsize; /* Size of the actual buffer */
off_t nbytes_xfer; /* Total number of bytes transferred so far */
+ size_t nbytes_xfer_advance; /* Number of bytes transferred in a single I/O operation */
size_t nbytes_toxfer; /* Number of bytes to transfer a particular time */
char dname[64];
off_t dset_offset=0; /*dataset offset in a file */
- off_t bytes_begin; /*first elmt this process transfer */
+ off_t bytes_begin[2]; /*first elmt this process transfer */
off_t bytes_count; /*number of elmts this process transfer */
+ off_t snbytes=0; /*size of a side of the dataset square */
unsigned char *buf_p; /* Current buffer pointer */
/* POSIX variables */
off_t file_offset; /* File offset of the next transfer */
+ off_t file_offset_advance; /* File offset advance after each I/O operation */
off_t posix_file_offset; /* Base file offset of the next transfer */
/* MPI variables */
MPI_Offset mpi_file_offset;/* Base file offset of the next transfer*/
- MPI_Offset mpi_offset; /* Offset in MPI file */
- MPI_Datatype mpi_file_type; /* MPI derived type for file */
- MPI_Datatype mpi_blk_type; /* MPI derived type for buffer */
- MPI_Status mpi_status;
+ MPI_Offset mpi_offset; /* Offset in MPI file */
+ MPI_Offset mpi_offset_advance; /* Offset advance after each I/O operation */
+ MPI_Datatype mpi_file_type; /* MPI derived type for 1D file */
+ MPI_Datatype mpi_blk_type; /* MPI derived type for 1D buffer */
+ MPI_Datatype mpi_cont_type; /* MPI derived type for 2D contiguous file */
+ MPI_Datatype mpi_partial_buffer_cont; /* MPI derived type for partial 2D contiguous buffer */
+ MPI_Datatype mpi_inter_type; /* MPI derived type for 2D interleaved file */
+ MPI_Datatype mpi_partial_buffer_inter; /* MPI derived type for partial 2D interleaved buffer */
+ MPI_Datatype mpi_full_buffer; /* MPI derived type for 2D full buffer */
+ MPI_Datatype mpi_full_chunk; /* MPI derived type for 2D full chunk */
+ MPI_Datatype mpi_chunk_inter_type; /* MPI derived type for 2D chunk interleaved file */
+ MPI_Datatype mpi_collective_type; /* Generic MPI derived type for 2D collective access */
+ MPI_Status mpi_status;
int mrc; /* MPI return code */
/* HDF5 variables */
herr_t hrc; /*HDF5 return code */
- hsize_t h5dims[1]; /*dataset dim sizes */
+ hsize_t h5dims[2]; /*dataset dim sizes */
hid_t h5dset_space_id = -1; /*dataset space ID */
hid_t h5mem_space_id = -1; /*memory dataspace ID */
hid_t h5ds_id = -1; /*dataset handle */
- hsize_t h5block[1]; /*dataspace selection */
- hsize_t h5stride[1];
- hsize_t h5count[1];
- hsize_t h5start[1];
- hssize_t h5offset[1]; /* Selection offset within dataspace */
+ hsize_t h5block[2]; /*dataspace selection */
+ hsize_t h5stride[2];
+ hsize_t h5count[2];
+ hsize_t h5start[2];
+ hssize_t h5offset[2]; /* Selection offset within dataspace */
hid_t h5dxpl = -1; /* Dataset transfer property list */
/* Get the parameters from the parameter block */
@@ -1018,13 +1568,42 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
* 012...n012...n...
* These are all in the scope of one dataset.
*/
- if (parms->interleaved==0) {
+
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ bsize = buf_size;
/* Contiguous Pattern: */
- bytes_begin = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
- } /* end if */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)(((double)nbytes*pio_mpi_rank_g)/pio_mpi_nprocs_g);
+ } /* end if */
+ /* Interleaved Pattern: */
+ else {
+ bytes_begin[0] = (off_t)(blk_size*pio_mpi_rank_g);
+ } /* end else */
+ }/* end if */
+ /* 2D dataspace */
else {
+ /* nbytes is always the number of bytes per dataset (1D or 2D). If the
+ dataspace is 2D, snbytes is the size of a side of the 'dataset square'.
+ */
+ snbytes = (off_t)sqrt(nbytes);
+
+ bsize = buf_size * blk_size;
+
+ /* Contiguous Pattern: */
+ if (!parms->interleaved) {
+ bytes_begin[0] = (off_t)((double)snbytes*pio_mpi_rank_g / pio_mpi_nprocs_g);
+ bytes_begin[1] = 0;
+ } /* end if */
/* Interleaved Pattern: */
- bytes_begin = (off_t)(blk_size*pio_mpi_rank_g);
+ else {
+ bytes_begin[0] = 0;
+
+ if (!parms->h5_use_chunks || parms->io_type==PHDF5)
+ bytes_begin[1] = (off_t)(blk_size*pio_mpi_rank_g);
+ else
+ bytes_begin[1] = (off_t)(blk_size*blk_size*pio_mpi_rank_g);
+ } /* end else */
} /* end else */
/* Calculate the total number of bytes (bytes_count) to be
@@ -1038,32 +1617,41 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
* (This is tricky, don't mess with the formula, rounding errors
* can easily get introduced) */
bytes_count = (off_t)(((double)nbytes*(pio_mpi_rank_g+1)) / pio_mpi_nprocs_g)
- - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
+ - (off_t)(((double)nbytes*pio_mpi_rank_g) / pio_mpi_nprocs_g);
/* debug */
if (pio_debug_level >= 4) {
HDprint_rank(output);
- HDfprintf(output, "Debug(do_read): "
+ if (!parms->dim2d) {
+ HDfprintf(output, "Debug(do_write): "
"buf_size=%Hd, bytes_begin=%Hd, bytes_count=%Hd\n",
- (long_long)buf_size, (long_long)bytes_begin,
+ (long_long)buf_size, (long_long)bytes_begin[0],
(long_long)bytes_count);
+ } else {
+ HDfprintf(output, "Debug(do_write): "
+ "linear buf_size=%Hd, bytes_begin=(%Hd,%Hd), bytes_count=%Hd\n",
+ (long_long)buf_size*blk_size, (long_long)bytes_begin[0],
+ (long_long)bytes_begin[1], (long_long)bytes_count);
+ }
}
/* I/O Access specific setup */
switch (parms->io_type) {
- case POSIXIO:
- /* No extra setup */
- break;
+ case POSIXIO:
+ /* No extra setup */
+ break;
- case MPIO: /* MPI-I/O setup */
+ case MPIO: /* MPI-I/O setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Build block's derived type */
mrc = MPI_Type_contiguous((int)blk_size,
- MPI_BYTE, &mpi_blk_type);
+ MPI_BYTE, &mpi_blk_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
/* Build file's derived type */
mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1,
- (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
+ (int)pio_mpi_nprocs_g, mpi_blk_type, &mpi_file_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
/* Commit file type */
@@ -1073,115 +1661,254 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Commit buffer type */
mrc = MPI_Type_commit( &mpi_blk_type );
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- break;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Build partial buffer derived type for contiguous access */
+ mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
+ &mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
- case PHDF5: /* HDF5 setup */
- if(nbytes>0) {
- /* define a contiquous dataset of nbytes native bytes */
- h5dims[0] = nbytes;
- h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
- VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_cont);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- /* Set up the file dset space id to select the pattern to access */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5start[0] = bytes_begin;
- h5stride[0] = h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
- } /* end if */
- else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5start[0] = bytes_begin;
- h5stride[0] = blk_size*pio_mpi_nprocs_g;
- h5block[0] = blk_size;
- h5count[0] = buf_size/blk_size;
- } /* end else */
- hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
- VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ /* Build contiguous file's derived type */
+ mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
+ mpi_partial_buffer_cont, &mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit contiguous file type */
+ mrc = MPI_Type_commit(&mpi_cont_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build partial buffer derived type for interleaved access */
+ mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
+ &mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit partial buffer derived type */
+ mrc = MPI_Type_commit(&mpi_partial_buffer_inter);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build interleaved file's derived type */
+ mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
+ mpi_partial_buffer_inter, &mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit interleaved file type */
+ mrc = MPI_Type_commit(&mpi_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full buffer derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*buf_size), MPI_BYTE,
+ &mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full buffer derived type */
+ mrc = MPI_Type_commit(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build full chunk derived type */
+ mrc = MPI_Type_contiguous((int)(blk_size*blk_size), MPI_BYTE,
+ &mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit full chunk derived type */
+ mrc = MPI_Type_commit(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+
+ /* Build chunk interleaved file's derived type */
+ mrc = MPI_Type_vector((int)(buf_size/blk_size), (int)1, (int)(snbytes/blk_size),
+ mpi_full_chunk, &mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
+
+ /* Commit chunk interleaved file type */
+ mrc = MPI_Type_commit(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
+ } /* end else */
+ break;
+
+ case PHDF5: /* HDF5 setup */
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = nbytes;
+ h5dset_space_id = H5Screate_simple(1, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
} /* end if */
else {
- h5dset_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5dset_space_id >= 0), "H5Screate");
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5stride[0] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = blk_size;
+ h5count[0] = buf_size/blk_size;
} /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
- /* Create the memory dataspace that corresponds to the xfer buffer */
- if(buf_size>0) {
- h5dims[0] = buf_size;
- h5mem_space_id = H5Screate_simple(1, h5dims, NULL);
- VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ h5dims[0] = buf_size;
+ h5mem_space_id = H5Screate_simple(1, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ if(nbytes>0) {
+ /* define a contiguous dataset of nbytes native bytes */
+ h5dims[0] = snbytes;
+ h5dims[1] = snbytes;
+ h5dset_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5dset_space_id >= 0), "H5Screate_simple");
+
+ /* Set up the file dset space id to select the pattern to access */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = 1;
+ h5stride[1] = h5block[0] = h5block[1] = blk_size;
+ h5count[0] = 1;
+ h5count[1] = buf_size/blk_size;
} /* end if */
else {
- h5mem_space_id = H5Screate(H5S_SCALAR);
- VRFY((h5mem_space_id >= 0), "H5Screate");
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5start[0] = bytes_begin[0];
+ h5start[1] = bytes_begin[1];
+ h5stride[0] = blk_size;
+ h5stride[1] = blk_size*pio_mpi_nprocs_g;
+ h5block[0] = h5block[1] = blk_size;
+ h5count[0] = buf_size/blk_size;
+ h5count[1] = 1;
} /* end else */
+ hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
+ h5start, h5stride, h5count, h5block);
+ VRFY((hrc >= 0), "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ h5dset_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5dset_space_id >= 0), "H5Screate");
+ } /* end else */
- /* Create the dataset transfer property list */
- h5dxpl = H5Pcreate(H5P_DATASET_XFER);
- if (h5dxpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
+ /* Create the memory dataspace that corresponds to the xfer buffer */
+ if(buf_size>0) {
+ if (!parms->interleaved){
+ h5dims[0] = blk_size;
+ h5dims[1] = buf_size;
+ }else{
+ h5dims[0] = buf_size;
+ h5dims[1] = blk_size;
}
+ h5mem_space_id = H5Screate_simple(2, h5dims, NULL);
+ VRFY((h5mem_space_id >= 0), "H5Screate_simple");
+ } /* end if */
+ else {
+ h5mem_space_id = H5Screate(H5S_SCALAR);
+ VRFY((h5mem_space_id >= 0), "H5Screate");
+ } /* end else */
+ } /* end else */
- /* Change to collective I/O, if asked */
- if(parms->collective) {
- hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- } /* end if */
- } /* end if */
- break;
+ /* Create the dataset transfer property list */
+ h5dxpl = H5Pcreate(H5P_DATASET_XFER);
+ if (h5dxpl < 0) {
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ /* Change to collective I/O, if asked */
+ if(parms->collective) {
+ hrc = H5Pset_dxpl_mpio(h5dxpl, H5FD_MPIO_COLLECTIVE);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ } /* end if */
+ } /* end if */
+ break;
} /* end switch */
for (ndset = 1; ndset <= ndsets; ++ndset) {
- /* Calculate dataset offset within a file */
+ /* Calculate dataset offset within a file */
- /* create dataset */
- switch (parms->io_type) {
+ /* create dataset */
+ switch (parms->io_type) {
case POSIXIO:
case MPIO:
- /* both posix and mpi io just need dataset offset in file*/
- dset_offset = (ndset - 1) * nbytes;
- break;
+ /* both posix and mpi io just need dataset offset in file*/
+ dset_offset = (ndset - 1) * nbytes;
+ break;
case PHDF5:
- sprintf(dname, "Dataset_%ld", ndset);
- h5ds_id = H5Dopen(fd->h5fd, dname);
- if (h5ds_id < 0) {
- fprintf(stderr, "HDF5 Dataset open failed\n");
- GOTOERROR(FAIL);
- }
-
- break;
+ sprintf(dname, "Dataset_%ld", ndset);
+ h5ds_id = H5DOPEN(fd->h5fd, dname);
+ if (h5ds_id < 0) {
+ fprintf(stderr, "HDF5 Dataset open failed\n");
+ GOTOERROR(FAIL);
}
- /* The task is to transfer bytes_count bytes, starting at
- * bytes_begin position, using transfer buffer of buf_size bytes.
- * If interleaved, select buf_size at a time, in round robin
- * fashion, according to number of process. Otherwise, select
- * all bytes_count in contiguous.
- */
- nbytes_xfer = 0 ;
+ break;
+ }
+
+ /* The task is to transfer bytes_count bytes, starting at
+ * bytes_begin position, using transfer buffer of buf_size bytes.
+ * If interleaved, select buf_size at a time, in round robin
+ * fashion, according to number of process. Otherwise, select
+ * all bytes_count in contiguous.
+ */
+ nbytes_xfer = 0 ;
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set base file offset for all I/O patterns and POSIX access */
+ posix_file_offset = dset_offset + bytes_begin[0];
+
+ /* Set base file offset for all I/O patterns and MPI access */
+ mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin[0]);
+ } /* end if */
+ else {
/* Set base file offset for all I/O patterns and POSIX access */
- posix_file_offset = dset_offset + bytes_begin;
+ posix_file_offset=dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1];
/* Set base file offset for all I/O patterns and MPI access */
- mpi_file_offset = (MPI_Offset)(dset_offset + bytes_begin);
+ mpi_file_offset=(MPI_Offset)(dset_offset + bytes_begin[0]*snbytes+
+ bytes_begin[1]);
+ } /* end else */
- /* Start "raw data" read timer */
- set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, START);
+ /* Start "raw data" read timer */
+ set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, START);
- while (nbytes_xfer < bytes_count){
- /* Read */
- /* Calculate offset of read within a dataset/file */
- switch (parms->io_type) {
- case POSIXIO:
+ while (nbytes_xfer < bytes_count){
+ /* Read */
+ /* Calculate offset of read within a dataset/file */
+ switch (parms->io_type) {
+ case POSIXIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Contiguous pattern */
- if (parms->interleaved==0) {
+ if (!parms->interleaved) {
/* Compute file offset */
file_offset = posix_file_offset + (off_t)nbytes_xfer;
@@ -1189,7 +1916,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
VRFY((rc==0), "POSIXSEEK");
- /* check if all bytes are written */
+ /* check if all bytes are read */
rc = ((ssize_t)buf_size ==
POSIXREAD(fd->posixfd, buffer, buf_size));
VRFY((rc != 0), "POSIXREAD");
@@ -1207,44 +1934,145 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Loop over the buffers to read */
while(nbytes_toxfer>0) {
- /* Skip offset over blocks of other processes */
- file_offset = posix_file_offset +
- (off_t)(nbytes_xfer*pio_mpi_nprocs_g);
+ /* Skip offset over blocks of other processes */
+ file_offset = posix_file_offset +
+ (off_t)(nbytes_xfer*pio_mpi_nprocs_g);
- /* only care if seek returns error */
- rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
- VRFY((rc==0), "POSIXSEEK");
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
- /* check if all bytes are written */
- rc = ((ssize_t)blk_size ==
- POSIXREAD(fd->posixfd, buf_p, blk_size));
- VRFY((rc != 0), "POSIXREAD");
+ /* check if all bytes are read */
+ rc = ((ssize_t)blk_size ==
+ POSIXREAD(fd->posixfd, buf_p, blk_size));
+ VRFY((rc != 0), "POSIXREAD");
- /* Advance location in buffer */
- buf_p+=blk_size;
+ /* Advance location in buffer */
+ buf_p+=blk_size;
- /* Advance global offset in dataset */
- nbytes_xfer+=blk_size;
+ /* Advance global offset in dataset */
+ nbytes_xfer+=blk_size;
- /* Decrement number of bytes left this time */
- nbytes_toxfer-=blk_size;
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=blk_size;
} /* end while */
} /* end else */
- break;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/blk_size)
+ /snbytes)*(blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)((((nbytes_xfer/buf_size)
+ *pio_mpi_nprocs_g)/snbytes)*(buf_size*snbytes)
+ +((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = (off_t)snbytes;
+ } /* end else */
+ } /* end if */
+ /* Chunked storage */
+ else {
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute file offset */
+ file_offset=posix_file_offset+(off_t)nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ file_offset_advance = 0;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute file offset */
+ /* Before simplification */
+ /* file_offset=posix_file_offset+(off_t)((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*(buf_size/blk_size
+ *snbytes/blk_size*(blk_size*blk_size))+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes/blk_size*(blk_size*blk_size))); */
+
+ file_offset=posix_file_offset+(off_t)(((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)+((nbytes_xfer/(buf_size/blk_size))
+ *pio_mpi_nprocs_g)%(snbytes*blk_size));
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* file_offset_advance = (off_t)(snbytes/blk_size*(blk_size*blk_size)); */
+ file_offset_advance = (off_t)(snbytes*blk_size);
+ } /* end else */
+ } /* end else */
+
+ /* Common code for file access */
+
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size*blk_size;
+
+ /* Loop over portions of the buffer to read */
+ while(nbytes_toxfer>0){
+ /* only care if seek returns error */
+ rc = POSIXSEEK(fd->posixfd, file_offset) < 0 ? -1 : 0;
+ VRFY((rc==0), "POSIXSEEK");
+
+ /* check if all bytes are read */
+ rc = ((ssize_t)nbytes_xfer_advance ==
+ POSIXREAD(fd->posixfd, buf_p, nbytes_xfer_advance));
+ VRFY((rc != 0), "POSIXREAD");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
- case MPIO:
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance file offset */
+ file_offset+=file_offset_advance;
+ } /* end while */
+
+ } /* end else */
+ break;
+
+ case MPIO:
+ /* 1D dataspace */
+ if (!parms->dim2d){
/* Independent file access */
- if(parms->collective==0) {
+ if(!parms->collective) {
/* Contiguous pattern */
- if (parms->interleaved==0){
+ if (!parms->interleaved){
/* Compute offset in file */
mpi_offset = mpi_file_offset +
nbytes_xfer;
/* Perform independent read */
mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type,
- &mpi_status);
+ (int)(buf_size/blk_size), mpi_blk_type,
+ &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
/* Advance global offset in dataset */
@@ -1262,11 +2090,11 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
while(nbytes_toxfer>0) {
/* Skip offset over blocks of other processes */
mpi_offset = mpi_file_offset +
- (nbytes_xfer*pio_mpi_nprocs_g);
+ (nbytes_xfer*pio_mpi_nprocs_g);
/* Perform independent read */
mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p,
- (int)1, mpi_blk_type, &mpi_status);
+ (int)1, mpi_blk_type, &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
/* Advance location in buffer */
@@ -1283,14 +2111,14 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Collective file access */
else {
/* Contiguous access pattern */
- if (parms->interleaved==0){
+ if (!parms->interleaved){
/* Compute offset in file */
mpi_offset = mpi_file_offset +
nbytes_xfer;
/* Perform collective read */
mrc = MPI_File_read_at_all(fd->mpifd, mpi_offset, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
/* Advance global offset in dataset */
@@ -1304,133 +2132,318 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Set the file view */
mrc = MPI_File_set_view(fd->mpifd, mpi_offset, mpi_blk_type,
- mpi_file_type, (char*)"native", h5_io_info_g);
+ mpi_file_type, (char*)"native", h5_io_info_g);
VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
/* Perform collective read */
mrc = MPI_File_read_at_all(fd->mpifd, 0, buffer,
- (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
+ (int)(buf_size/blk_size), mpi_blk_type, &mpi_status);
VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
/* Advance global offset in dataset */
nbytes_xfer+=buf_size;
} /* end else */
} /* end else */
- break;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Contiguous storage */
+ if (!parms->h5_use_chunks) {
+ /* Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/blk_size)/snbytes)*
+ (blk_size*snbytes)+((nbytes_xfer/blk_size)%snbytes);
- case PHDF5:
- /* Set up the file dset space id to move the selection to process */
- if (parms->interleaved==0){
- /* Contiguous pattern */
- h5offset[0] = nbytes_xfer;
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_cont_type;
+ } /* end if */
+ /* Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+(((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)/snbytes)*
+ (buf_size*snbytes)+((nbytes_xfer/buf_size)*pio_mpi_nprocs_g)%snbytes;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = snbytes;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_inter_type;
+ } /* end else */
} /* end if */
+ /* Chunked storage */
else {
- /* Interleaved access pattern */
- /* Skip offset over blocks of other processes */
- h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
+ /*Contiguous access pattern */
+ if (!parms->interleaved) {
+ /* Compute offset in file */
+ mpi_offset=mpi_file_offset+nbytes_xfer;
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * buf_size;
+
+ /* Global offset advance after each I/O operation */
+ mpi_offset_advance = 0;
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_full_buffer;
+ } /* end if */
+ /*Interleaved access pattern */
+ else {
+ /* Compute offset in file */
+ /* Before simplification */
+ /* mpi_offset=mpi_file_offset+(nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes/blk_size*(blk_size*blk_size))*
+ (buf_size/blk_size*snbytes/blk_size*(blk_size*blk_size))+
+ ((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes
+ /blk_size*(blk_size*blk_size)); */
+ mpi_offset=mpi_file_offset+((nbytes_xfer/(buf_size/blk_size)
+ *pio_mpi_nprocs_g)/(snbytes*blk_size))*(buf_size*snbytes)
+ +((nbytes_xfer/(buf_size/blk_size))*pio_mpi_nprocs_g)%(snbytes*blk_size);
+
+ /* Number of bytes to be transferred per I/O operation */
+ nbytes_xfer_advance = blk_size * blk_size;
+
+ /* Global offset advance after each I/O operation */
+ /* mpi_offset_advance = (MPI_Offset)(snbytes/blk_size*(blk_size*blk_size)); */
+ mpi_offset_advance = (MPI_Offset)(snbytes*blk_size);
+
+ /* MPI type to be used for collective access */
+ mpi_collective_type = mpi_chunk_inter_type;
+ } /* end else */
} /* end else */
- hrc = H5Soffset_simple(h5dset_space_id, h5offset);
- VRFY((hrc >= 0), "H5Soffset_simple");
-
- /* Read the buffer in */
- hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
- h5dset_space_id, h5dxpl, buffer);
- VRFY((hrc >= 0), "H5Dread");
-
- /* Increment number of bytes transferred */
- nbytes_xfer += buf_size;
-
- break;
- } /* switch (parms->io_type) */
-
- /* Verify raw data, if asked */
- if (parms->verify) {
- /* Verify data read */
- unsigned char *ucharptr = (unsigned char *)buffer;
- size_t i;
- int nerror=0;
-
- for (i = 0; i < buf_size; ++i){
- if (*ucharptr++ != pio_mpi_rank_g) {
- if (++nerror < 20){
- /* report at most 20 errors */
- HDprint_rank(output);
- HDfprintf(output, "read data error, expected (%Hd), "
- "got (%Hd)\n",
- (long_long)pio_mpi_rank_g,
- (long_long)*(ucharptr-1));
- } /* end if */
- } /* end if */
- } /* end for */
- if (nerror >= 20) {
- HDprint_rank(output);
- HDfprintf(output, "...");
- HDfprintf(output, "total read data errors=%d\n",
- nerror);
- } /* end if */
- } /* if (parms->verify) */
-
- } /* end while */
-
- /* Stop "raw data" read timer */
- set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, STOP);
-
- /* Calculate read time */
-
- /* Close dataset. Only HDF5 needs to do an explicit close. */
- if (parms->io_type == PHDF5) {
- hrc = H5Dclose(h5ds_id);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Close failed\n");
- GOTOERROR(FAIL);
- }
+ /* Common code for independent file access */
+ if (!parms->collective) {
+ /* Set the base of user's buffer */
+ buf_p = (unsigned char *)buffer;
+
+ /* Set the number of bytes to transfer this time */
+ nbytes_toxfer = buf_size * blk_size;
+
+ /* Loop over portions of the buffer to read */
+ while(nbytes_toxfer>0){
+ /* Perform independent read */
+ mrc = MPI_File_read_at(fd->mpifd, mpi_offset, buf_p,
+ (int)nbytes_xfer_advance, MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance location in buffer */
+ buf_p+=nbytes_xfer_advance;
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=nbytes_xfer_advance;
- h5ds_id = -1;
+ /* Decrement number of bytes left this time */
+ nbytes_toxfer-=nbytes_xfer_advance;
+
+ /* Partially advance global offset in dataset */
+ mpi_offset+=mpi_offset_advance;
+ } /* end while */
+ } /* end if */
+
+ /* Common code for collective file access */
+ else {
+ /* Set the file view */
+ mrc = MPI_File_set_view(fd->mpifd, mpi_offset, MPI_BYTE,
+ mpi_collective_type, (char *)"native", h5_io_info_g);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_VIEW");
+
+ /* Perform read */
+ MPI_File_read_at_all(fd->mpifd, 0, buffer,(int)(buf_size*blk_size),
+ MPI_BYTE, &mpi_status);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_READ");
+
+ /* Advance global offset in dataset */
+ nbytes_xfer+=buf_size*blk_size;
+ } /* end else */
+
+ } /* end else */
+ break;
+
+ case PHDF5:
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = nbytes_xfer;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = (nbytes_xfer*pio_mpi_nprocs_g);
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Read the buffer in */
+ hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dread");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size;
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Set up the file dset space id to move the selection to process */
+ if (!parms->interleaved){
+ /* Contiguous pattern */
+ h5offset[0] = (nbytes_xfer/(snbytes*blk_size))*blk_size;
+ h5offset[1] = (nbytes_xfer%(snbytes*blk_size))/blk_size;
+ } /* end if */
+ else {
+ /* Interleaved access pattern */
+ /* Skip offset over blocks of other processes */
+ h5offset[0] = ((nbytes_xfer*pio_mpi_nprocs_g)/(snbytes*buf_size))*buf_size;
+ h5offset[1] = ((nbytes_xfer*pio_mpi_nprocs_g)%(snbytes*buf_size))/buf_size;
+
+ } /* end else */
+ hrc = H5Soffset_simple(h5dset_space_id, h5offset);
+ VRFY((hrc >= 0), "H5Soffset_simple");
+
+ /* Write the buffer out */
+ hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
+ h5dset_space_id, h5dxpl, buffer);
+ VRFY((hrc >= 0), "H5Dread");
+
+ /* Increment number of bytes transferred */
+ nbytes_xfer += buf_size*blk_size;
+
+ } /* end else */
+ break;
+ } /* switch (parms->io_type) */
+
+ /* Verify raw data, if asked */
+ if (parms->verify) {
+ /* Verify data read */
+ unsigned char *ucharptr = (unsigned char *)buffer;
+ size_t i;
+ int nerror=0;
+
+ for (i = 0; i < bsize; ++i){
+ if (*ucharptr++ != pio_mpi_rank_g+1) {
+ if (++nerror < 20){
+ /* report at most 20 errors */
+ HDprint_rank(output);
+ HDfprintf(output, "read data error, expected (%Hd), "
+ "got (%Hd)\n",
+ (long_long)pio_mpi_rank_g+1,
+ (long_long)*(ucharptr-1));
+ } /* end if */
+ } /* end if */
+ } /* end for */
+ if (nerror >= 20) {
+ HDprint_rank(output);
+ HDfprintf(output, "...");
+ HDfprintf(output, "total read data errors=%d\n",
+ nerror);
} /* end if */
+ } /* if (parms->verify) */
+
+ } /* end while */
+
+ /* Stop "raw data" read timer */
+ set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, STOP);
+
+ /* Calculate read time */
+
+ /* Close dataset. Only HDF5 needs to do an explicit close. */
+ if (parms->io_type == PHDF5) {
+ hrc = H5Dclose(h5ds_id);
+
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Close failed\n");
+ GOTOERROR(FAIL);
+ }
+
+ h5ds_id = -1;
+ } /* end if */
} /* end for */
done:
/* release MPI-I/O objects */
if (parms->io_type == MPIO) {
- /* Free file type */
- mrc = MPI_Type_free( &mpi_file_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ /* 1D dataspace */
+ if (!parms->dim2d){
+ /* Free file type */
+ mrc = MPI_Type_free( &mpi_file_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free buffer type */
+ mrc = MPI_Type_free( &mpi_blk_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end if */
+ /* 2D dataspace */
+ else {
+ /* Free partial buffer type for contiguous access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_cont );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free contiguous file type */
+ mrc = MPI_Type_free( &mpi_cont_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
- /* Free buffer type */
- mrc = MPI_Type_free( &mpi_blk_type );
- VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ /* Free partial buffer type for interleaved access */
+ mrc = MPI_Type_free( &mpi_partial_buffer_inter );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free interleaved file type */
+ mrc = MPI_Type_free( &mpi_inter_type );
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full buffer type */
+ mrc = MPI_Type_free(&mpi_full_buffer);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free full chunk type */
+ mrc = MPI_Type_free(&mpi_full_chunk);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+
+ /* Free chunk interleaved file type */
+ mrc = MPI_Type_free(&mpi_chunk_inter_type);
+ VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_FREE");
+ } /* end else */
} /* end if */
/* release HDF5 objects */
if (h5dset_space_id != -1) {
- hrc = H5Sclose(h5dset_space_id);
- if (hrc < 0){
- fprintf(stderr, "HDF5 Dataset Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5dset_space_id = -1;
- }
+ hrc = H5Sclose(h5dset_space_id);
+ if (hrc < 0){
+ fprintf(stderr, "HDF5 Dataset Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dset_space_id = -1;
+ }
}
if (h5mem_space_id != -1) {
- hrc = H5Sclose(h5mem_space_id);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Memory Space Close failed\n");
- ret_code = FAIL;
- } else {
- h5mem_space_id = -1;
- }
+ hrc = H5Sclose(h5mem_space_id);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Memory Space Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5mem_space_id = -1;
+ }
}
if (h5dxpl != -1) {
- hrc = H5Pclose(h5dxpl);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
- ret_code = FAIL;
- } else {
- h5dxpl = -1;
- }
+ hrc = H5Pclose(h5dxpl);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
+ ret_code = FAIL;
+ } else {
+ h5dxpl = -1;
+ }
}
return ret_code;
@@ -1443,7 +2456,7 @@ done:
* Programmer: Albert Cheng, Bill Wendling, 2001/12/13
* Modifications:
*/
-static herr_t
+ static herr_t
do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
{
int ret_code = SUCCESS, mrc;
@@ -1454,13 +2467,13 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
switch (param->io_type) {
case POSIXIO:
if (flags & (PIO_CREATE | PIO_WRITE))
- fd->posixfd = POSIXCREATE(fname);
+ fd->posixfd = POSIXCREATE(fname);
else
- fd->posixfd = POSIXOPEN(fname, O_RDONLY);
+ fd->posixfd = POSIXOPEN(fname, O_RDONLY);
if (fd->posixfd < 0 ) {
- fprintf(stderr, "POSIX File Open failed(%s)\n", fname);
- GOTOERROR(FAIL);
+ fprintf(stderr, "POSIX File Open failed(%s)\n", fname);
+ GOTOERROR(FAIL);
}
@@ -1478,31 +2491,31 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
case MPIO:
if (flags & (PIO_CREATE | PIO_WRITE)) {
- MPI_File_delete(fname, h5_io_info_g);
- mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_CREATE | MPI_MODE_RDWR,
- h5_io_info_g, &fd->mpifd);
+ MPI_File_delete(fname, h5_io_info_g);
+ mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_CREATE | MPI_MODE_RDWR,
+ h5_io_info_g, &fd->mpifd);
- if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI File Open failed(%s)\n", fname);
- GOTOERROR(FAIL);
- }
+ if (mrc != MPI_SUCCESS) {
+ fprintf(stderr, "MPI File Open failed(%s)\n", fname);
+ GOTOERROR(FAIL);
+ }
- /*since MPI_File_open with MPI_MODE_CREATE does not truncate */
- /*filesize , set size to 0 explicitedly. */
- mrc = MPI_File_set_size(fd->mpifd, (MPI_Offset)0);
+ /*since MPI_File_open with MPI_MODE_CREATE does not truncate */
+ /*filesize , set size to 0 explicitedly. */
+ mrc = MPI_File_set_size(fd->mpifd, (MPI_Offset)0);
- if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI_File_set_size failed\n");
- GOTOERROR(FAIL);
- }
+ if (mrc != MPI_SUCCESS) {
+ fprintf(stderr, "MPI_File_set_size failed\n");
+ GOTOERROR(FAIL);
+ }
} else {
- mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_RDONLY,
- h5_io_info_g, &fd->mpifd);
+ mrc = MPI_File_open(pio_comm_g, fname, MPI_MODE_RDONLY,
+ h5_io_info_g, &fd->mpifd);
- if (mrc != MPI_SUCCESS) {
- fprintf(stderr, "MPI File Open failed(%s)\n", fname);
- GOTOERROR(FAIL);
- }
+ if (mrc != MPI_SUCCESS) {
+ fprintf(stderr, "MPI File Open failed(%s)\n", fname);
+ GOTOERROR(FAIL);
+ }
}
break;
@@ -1510,57 +2523,53 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
case PHDF5:
acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
if (acc_tpl < 0) {
- fprintf(stderr, "HDF5 Property List Create failed\n");
- GOTOERROR(FAIL);
+ fprintf(stderr, "HDF5 Property List Create failed\n");
+ GOTOERROR(FAIL);
}
/* Use the appropriate VFL driver */
if(param->h5_use_mpi_posix) {
- /* Set the file driver to the MPI-posix driver */
-#ifdef H5_WANT_H5_V1_4_COMPAT
- hrc = H5Pset_fapl_mpiposix(acc_tpl, pio_comm_g);
-#else /* H5_WANT_H5_V1_4_COMPAT */
- hrc = H5Pset_fapl_mpiposix(acc_tpl, pio_comm_g, use_gpfs);
-#endif /* H5_WANT_H5_V1_4_COMPAT */
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- }
+ /* Set the file driver to the MPI-posix driver */
+ hrc = H5Pset_fapl_mpiposix(acc_tpl, pio_comm_g, use_gpfs);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ }
} /* end if */
else {
- /* Set the file driver to the MPI-I/O driver */
- hrc = H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g);
- if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
- }
+ /* Set the file driver to the MPI-I/O driver */
+ hrc = H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g);
+ if (hrc < 0) {
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
+ }
} /* end else */
/* Set the alignment of objects in HDF5 file */
hrc = H5Pset_alignment(acc_tpl, param->h5_thresh, param->h5_align);
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Set failed\n");
- GOTOERROR(FAIL);
+ fprintf(stderr, "HDF5 Property List Set failed\n");
+ GOTOERROR(FAIL);
}
/* create the parallel file */
if (flags & (PIO_CREATE | PIO_WRITE)) {
- fd->h5fd = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ fd->h5fd = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
} else {
- fd->h5fd = H5Fopen(fname, H5F_ACC_RDONLY, acc_tpl);
+ fd->h5fd = H5Fopen(fname, H5F_ACC_RDONLY, acc_tpl);
}
hrc = H5Pclose(acc_tpl);
if (fd->h5fd < 0) {
- fprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
- GOTOERROR(FAIL);
+ fprintf(stderr, "HDF5 File Create failed(%s)\n", fname);
+ GOTOERROR(FAIL);
}
/* verifying the close of the acc_tpl */
if (hrc < 0) {
- fprintf(stderr, "HDF5 Property List Close failed\n");
- GOTOERROR(FAIL);
+ fprintf(stderr, "HDF5 Property List Close failed\n");
+ GOTOERROR(FAIL);
}
break;
@@ -1577,7 +2586,7 @@ done:
* Programmer: Albert Cheng, Bill Wendling, 2001/12/13
* Modifications:
*/
-static herr_t
+ static herr_t
do_fclose(iotype iot, file_descr *fd /*out*/)
{
herr_t ret_code = SUCCESS, hrc;
@@ -1588,8 +2597,8 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
rc = POSIXCLOSE(fd->posixfd);
if (rc != 0){
- fprintf(stderr, "POSIX File Close failed\n");
- GOTOERROR(FAIL);
+ fprintf(stderr, "POSIX File Close failed\n");
+ GOTOERROR(FAIL);
}
fd->posixfd = -1;
@@ -1599,8 +2608,8 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
mrc = MPI_File_close(&fd->mpifd);
if (mrc != MPI_SUCCESS){
- fprintf(stderr, "MPI File close failed\n");
- GOTOERROR(FAIL);
+ fprintf(stderr, "MPI File close failed\n");
+ GOTOERROR(FAIL);
}
fd->mpifd = MPI_FILE_NULL;
@@ -1610,8 +2619,8 @@ do_fclose(iotype iot, file_descr *fd /*out*/)
hrc = H5Fclose(fd->h5fd);
if (hrc < 0) {
- fprintf(stderr, "HDF5 File Close failed\n");
- GOTOERROR(FAIL);
+ fprintf(stderr, "HDF5 File Close failed\n");
+ GOTOERROR(FAIL);
}
fd->h5fd = -1;
@@ -1626,37 +2635,37 @@ done:
/*
* Function: do_fclose
* Purpose: Cleanup temporary file unless HDF5_NOCLEANUP is set.
- * Only Proc 0 of the PIO communicator will do the cleanup.
- * Other processes just return.
+ * Only Proc 0 of the PIO communicator will do the cleanup.
+ * Other processes just return.
* Return: void
* Programmer: Albert Cheng 2001/12/12
* Modifications:
*/
-static void
+ static void
do_cleanupfile(iotype iot, char *fname)
{
if (pio_mpi_rank_g != 0)
- return;
+ return;
if (clean_file_g == -1)
- clean_file_g = (getenv("HDF5_NOCLEANUP")==NULL) ? 1 : 0;
+ clean_file_g = (getenv("HDF5_NOCLEANUP")==NULL) ? 1 : 0;
if (clean_file_g){
- switch (iot){
+ switch (iot){
case POSIXIO:
- remove(fname);
- break;
+ remove(fname);
+ break;
case MPIO:
case PHDF5:
- MPI_File_delete(fname, h5_io_info_g);
- break;
- }
+ MPI_File_delete(fname, h5_io_info_g);
+ break;
+ }
}
}
#ifdef H5_HAVE_GPFS
- /* Descriptions here come from the IBM GPFS Manual */
+/* Descriptions here come from the IBM GPFS Manual */
/*
* Function: gpfs_access_range
@@ -1687,12 +2696,12 @@ do_cleanupfile(iotype iot, char *fname)
* Programmer: Bill Wendling, 03. June 2002
* Modifications:
*/
-static void
+ static void
gpfs_access_range(int handle, off_t start, off_t length, int is_write)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsAccessRange_t access;
+ gpfsFcntlHeader_t hdr;
+ gpfsAccessRange_t access;
} access_range;
access_range.hdr.totalLength = sizeof(access_range);
@@ -1705,10 +2714,10 @@ gpfs_access_range(int handle, off_t start, off_t length, int is_write)
access_range.access.isWrite = is_write;
if (gpfs_fcntl(handle, &access_range) != 0) {
- fprintf(stderr,
- "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
- errno, access_range.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
+ errno, access_range.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -1736,12 +2745,12 @@ gpfs_access_range(int handle, off_t start, off_t length, int is_write)
* Programmer: Bill Wendling, 03. June 2002
* Modifications:
*/
-static void
+ static void
gpfs_free_range(int handle, off_t start, off_t length)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsFreeRange_t range;
+ gpfsFcntlHeader_t hdr;
+ gpfsFreeRange_t range;
} free_range;
/* Issue the invalidate hint */
@@ -1754,10 +2763,10 @@ gpfs_free_range(int handle, off_t start, off_t length)
free_range.range.length = length;
if (gpfs_fcntl(handle, &free_range) != 0) {
- fprintf(stderr,
- "gpfs_fcntl free range failed for range %d:%d. errno=%d errorOffset=%d\n",
- start, length, errno, free_range.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl free range failed for range %d:%d. errno=%d errorOffset=%d\n",
+ start, length, errno, free_range.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -1780,12 +2789,12 @@ gpfs_free_range(int handle, off_t start, off_t length)
* Programmer: Bill Wendling, 03. June 2002
* Modifications:
*/
-static void
+ static void
gpfs_clear_file_cache(int handle)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsClearFileCache_t clear;
+ gpfsFcntlHeader_t hdr;
+ gpfsClearFileCache_t clear;
} clear_cache;
clear_cache.hdr.totalLength = sizeof(clear_cache);
@@ -1795,10 +2804,10 @@ gpfs_clear_file_cache(int handle)
clear_cache.clear.structType = GPFS_CLEAR_FILE_CACHE;
if (gpfs_fcntl(handle, &clear_cache) != 0) {
- fprintf(stderr,
- "gpfs_fcntl clear file cache directive failed. errno=%d errorOffset=%d\n",
- errno, clear_cache.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl clear file cache directive failed. errno=%d errorOffset=%d\n",
+ errno, clear_cache.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -1826,12 +2835,12 @@ gpfs_clear_file_cache(int handle)
* Programmer: Bill Wendling, 03. June 2002
* Modifications:
*/
-static void
+ static void
gpfs_cancel_hints(int handle)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsCancelHints_t cancel;
+ gpfsFcntlHeader_t hdr;
+ gpfsCancelHints_t cancel;
} cancel_hints;
cancel_hints.hdr.totalLength = sizeof(cancel_hints);
@@ -1841,10 +2850,10 @@ gpfs_cancel_hints(int handle)
cancel_hints.cancel.structType = GPFS_CANCEL_HINTS;
if (gpfs_fcntl(handle, &cancel_hints) != 0) {
- fprintf(stderr,
- "gpfs_fcntl cancel hints directive failed. errno=%d errorOffset=%d\n",
- errno, cancel_hints.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl cancel hints directive failed. errno=%d errorOffset=%d\n",
+ errno, cancel_hints.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -1872,12 +2881,12 @@ gpfs_cancel_hints(int handle)
* Programmer: Bill Wendling, 28. May 2002
* Modifications:
*/
-static void
+ static void
gpfs_start_data_shipping(int handle, int num_insts)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsDataShipStart_t start;
+ gpfsFcntlHeader_t hdr;
+ gpfsDataShipStart_t start;
} ds_start;
ds_start.hdr.totalLength = sizeof(ds_start);
@@ -1889,10 +2898,10 @@ gpfs_start_data_shipping(int handle, int num_insts)
ds_start.start.reserved = 0;
if (gpfs_fcntl(handle, &ds_start) != 0) {
- fprintf(stderr,
- "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
- errno, ds_start.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl DS start directive failed. errno=%d errorOffset=%d\n",
+ errno, ds_start.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -1915,14 +2924,14 @@ gpfs_start_data_shipping(int handle, int num_insts)
* Programmer: Bill Wendling, 10. Jul 2002
* Modifications:
*/
-static void
+ static void
gpfs_start_data_ship_map(int handle, int partition_size, int agent_count,
- int *agent_node_num)
+ int *agent_node_num)
{
int i;
struct {
- gpfsFcntlHeader_t hdr;
- gpfsDataShipMap_t map;
+ gpfsFcntlHeader_t hdr;
+ gpfsDataShipMap_t map;
} ds_map;
ds_map.hdr.totalLength = sizeof(ds_map);
@@ -1934,13 +2943,13 @@ gpfs_start_data_ship_map(int handle, int partition_size, int agent_count,
ds_map.map.agentCount = agent_count;
for (i = 0; i < agent_count; ++i)
- ds_map.map.agentNodeNumber[i] = agent_node_num[i];
+ ds_map.map.agentNodeNumber[i] = agent_node_num[i];
if (gpfs_fcntl(handle, &ds_map) != 0) {
- fprintf(stderr,
- "gpfs_fcntl DS map directive failed. errno=%d errorOffset=%d\n",
- errno, ds_map.hdr.errorOffset);
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "gpfs_fcntl DS map directive failed. errno=%d errorOffset=%d\n",
+ errno, ds_map.hdr.errorOffset);
+ exit(EXIT_FAILURE);
}
}
@@ -1968,12 +2977,12 @@ gpfs_start_data_ship_map(int handle, int partition_size, int agent_count,
* Programmer: Bill Wendling, 28. May 2002
* Modifications:
*/
-static void
+ static void
gpfs_stop_data_shipping(int handle)
{
struct {
- gpfsFcntlHeader_t hdr;
- gpfsDataShipStop_t stop;
+ gpfsFcntlHeader_t hdr;
+ gpfsDataShipStop_t stop;
} ds_stop;
ds_stop.hdr.totalLength = sizeof(ds_stop);
@@ -1983,9 +2992,9 @@ gpfs_stop_data_shipping(int handle)
ds_stop.stop.structType = GPFS_DATA_SHIP_STOP;
if (gpfs_fcntl(handle, &ds_stop) != 0)
- fprintf(stderr,
- "gpfs_fcntl DS stop directive failed. errno=%d errorOffset=%d\n",
- errno, ds_stop.hdr.errorOffset);
+ fprintf(stderr,
+ "gpfs_fcntl DS stop directive failed. errno=%d errorOffset=%d\n",
+ errno, ds_stop.hdr.errorOffset);
}
/*
@@ -1996,20 +3005,20 @@ gpfs_stop_data_shipping(int handle)
* Programmer: Bill Wendling, 03. June 2002
* Modifications:
*/
-static void
+ static void
gpfs_invalidate_file_cache(const char *filename)
{
int handle;
struct {
- gpfsFcntlHeader_t hdr;
- gpfsClearFileCache_t inv;
+ gpfsFcntlHeader_t hdr;
+ gpfsClearFileCache_t inv;
} inv_cache_hint;
/* Open the file. If the open fails, the file cannot be cached. */
handle = open(filename, O_RDONLY, 0);
if (handle == -1)
- return;
+ return;
/* Issue the invalidate hint */
inv_cache_hint.hdr.totalLength = sizeof(inv_cache_hint);
@@ -2019,21 +3028,21 @@ gpfs_invalidate_file_cache(const char *filename)
inv_cache_hint.inv.structType = GPFS_CLEAR_FILE_CACHE;
if (gpfs_fcntl(handle, &inv_cache_hint) != 0) {
- fprintf(stderr,
- "gpfs_fcntl clear cache hint failed for file '%s'.",
- filename);
- fprintf(stderr, " errno=%d errorOffset=%d\n",
- errno, inv_cache_hint.hdr.errorOffset);
- exit(1);
+ fprintf(stderr,
+ "gpfs_fcntl clear cache hint failed for file '%s'.",
+ filename);
+ fprintf(stderr, " errno=%d errorOffset=%d\n",
+ errno, inv_cache_hint.hdr.errorOffset);
+ exit(1);
}
/* Close the file */
if (close(handle) == -1) {
- fprintf(stderr,
- "could not close file '%s' after flushing file cache, ",
- filename);
- fprintf(stderr, "errno=%d\n", errno);
- exit(1);
+ fprintf(stderr,
+ "could not close file '%s' after flushing file cache, ",
+ filename);
+ fprintf(stderr, "errno=%d\n", errno);
+ exit(1);
}
}
@@ -2043,51 +3052,51 @@ gpfs_invalidate_file_cache(const char *filename)
#if 0
/* H5_HAVE_GPFS isn't defined...stub functions */
-static void
+ static void
gpfs_access_range(int UNUSED handle, off_t UNUSED start, off_t UNUSED length,
- int UNUSED is_write)
+ int UNUSED is_write)
{
return;
}
-static void
+ static void
gpfs_free_range(int UNUSED handle, off_t UNUSED start, off_t UNUSED length)
{
return;
}
-static void
+ static void
gpfs_clear_file_cache(int UNUSED handle)
{
return;
}
-static void
+ static void
gpfs_cancel_hints(int UNUSED handle)
{
return;
}
-static void
+ static void
gpfs_start_data_shipping(int UNUSED handle, int UNUSED num_insts)
{
return;
}
-static void
+ static void
gpfs_stop_data_shipping(int UNUSED handle)
{
return;
}
-static void
+ static void
gpfs_start_data_ship_map(int UNUSED handle, int UNUSED partition_size,
- int UNUSED agent_count, int UNUSED *agent_node_num)
+ int UNUSED agent_count, int UNUSED *agent_node_num)
{
return;
}
-static void
+ static void
gpfs_invalidate_file_cache(const char UNUSED *filename)
{
return;
@@ -2102,7 +3111,7 @@ gpfs_invalidate_file_cache(const char UNUSED *filename)
* pure time spent in MPI_File code.
*/
int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_READ, START);
@@ -2113,7 +3122,7 @@ int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_READ, START);
@@ -2123,7 +3132,7 @@ int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,
}
int MPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
- int count, MPI_Datatype datatype, MPI_Status *status)
+ int count, MPI_Datatype datatype, MPI_Status *status)
{
int err;
set_time(timer_g, HDF5_MPI_WRITE, START);
@@ -2142,5 +3151,10 @@ int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf,
return err;
}
-#endif /* TIME_MPI */
+#endif /* TIME_MPI */
#endif /* H5_HAVE_PARALLEL */
+
+
+
+
+
diff --git a/perform/pio_perf.c b/perform/pio_perf.c
index db28715..bb1880a 100644
--- a/perform/pio_perf.c
+++ b/perform/pio_perf.c
@@ -70,7 +70,6 @@
#include <mpi.h>
/* our header files */
-#include "h5tools_utils.h"
#include "pio_perf.h"
/* useful macros */
@@ -118,9 +117,9 @@ static const char *progname = "h5perf";
* adding more, make sure that they don't clash with each other.
*/
#if 1
-static const char *s_opts = "a:A:B:cCd:D:e:F:hi:Imno:p:P:stT:wx:X:";
+static const char *s_opts = "a:A:B:cCd:D:e:F:ghi:Imno:p:P:stT:wx:X:";
#else
-static const char *s_opts = "a:A:bB:cCd:D:e:F:hi:Imno:p:P:stT:wx:X:";
+static const char *s_opts = "a:A:bB:cCd:D:e:F:ghi:Imno:p:P:stT:wx:X:";
#endif /* 1 */
static struct long_options l_opts[] = {
{ "align", require_arg, 'a' },
@@ -163,6 +162,13 @@ static struct long_options l_opts[] = {
{ "debu", require_arg, 'D' },
{ "deb", require_arg, 'D' },
{ "de", require_arg, 'D' },
+ { "geometry", no_arg, 'g' },
+ { "geometr", no_arg, 'g' },
+ { "geomet", no_arg, 'g' },
+ { "geome", no_arg, 'g' },
+ { "geom", no_arg, 'g' },
+ { "geo", no_arg, 'g' },
+ { "ge", no_arg, 'g' },
{ "help", no_arg, 'h' },
{ "hel", no_arg, 'h' },
{ "he", no_arg, 'h' },
@@ -272,7 +278,7 @@ struct options {
const char *output_file; /* file to print report to */
long num_dsets; /* number of datasets */
long num_files; /* number of files */
- size_t num_bpp; /* number of bytes per proc per dset */
+ off_t num_bpp; /* number of bytes per proc per dset */
int num_iters; /* number of iterations */
int max_num_procs; /* maximum number of processes to use */
int min_num_procs; /* minimum number of processes to use */
@@ -281,6 +287,7 @@ struct options {
size_t blk_size; /* Block size */
unsigned interleaved; /* Interleaved vs. contiguous blocks */
unsigned collective; /* Collective vs. independent I/O */
+ unsigned dim2d; /* 1D vs. 2D geometry */
int print_times; /* print times as well as throughputs */
int print_raw; /* print raw data throughput info */
off_t h5_alignment; /* alignment in HDF5 file */
@@ -410,6 +417,7 @@ finish:
* Return: Nothing
* Programmer: Bill Wendling, 30. October 2001
* Modifications:
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
static void
run_test_loop(struct options *opts)
@@ -417,6 +425,7 @@ run_test_loop(struct options *opts)
parameters parms;
int num_procs;
int doing_pio; /* if this process is doing PIO */
+ off_t snbytes;
parms.num_files = opts->num_files;
parms.num_dsets = opts->num_dsets;
@@ -424,6 +433,7 @@ run_test_loop(struct options *opts)
parms.blk_size = opts->blk_size;
parms.interleaved = opts->interleaved;
parms.collective = opts->collective;
+ parms.dim2d = opts->dim2d;
parms.h5_align = opts->h5_alignment;
parms.h5_thresh = opts->h5_threshold;
parms.h5_use_chunks = opts->h5_use_chunks;
@@ -443,24 +453,42 @@ run_test_loop(struct options *opts)
/* do something harsh */
}
- /* only processes doing PIO will run the tests */
- if (doing_pio){
+ /* only processes doing PIO will run the tests */
+ if (doing_pio){
output_report("Number of processors = %ld\n", parms.num_procs);
/* multiply the xfer buffer size by 2 for each loop iteration */
for (buf_size = opts->min_xfer_size;
buf_size <= opts->max_xfer_size; buf_size <<= 1) {
parms.buf_size = buf_size;
- parms.num_bytes = (off_t)opts->num_bpp*parms.num_procs;
- print_indent(1);
- output_report("Transfer Buffer Size: %ld bytes, File size: %.2f MBs\n",
- buf_size,
- ((double)parms.num_dsets * (double)parms.num_bytes)
- / ONE_MB);
- print_indent(1);
- output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2f MBs\n",
- parms.num_files, parms.num_dsets, (double)parms.num_bytes/ONE_MB);
+ if (parms.dim2d){
+ parms.num_bytes = (off_t)pow((double)(opts->num_bpp*parms.num_procs),2);
+ if (parms.interleaved)
+ output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
+ buf_size, opts->blk_size,
+ ((double)parms.num_dsets * (double)parms.num_bytes)
+ / ONE_MB);
+ else
+ output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
+ opts->blk_size, buf_size,
+ ((double)parms.num_dsets * (double)parms.num_bytes)
+ / ONE_MB);
+
+ print_indent(1);
+ output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2fx%.2f KBs\n",
+ parms.num_files, parms.num_dsets, (double)(opts->num_bpp*parms.num_procs)/ONE_KB,
+ (double)(opts->num_bpp*parms.num_procs)/ONE_KB);
+ }
+ else{
+ parms.num_bytes = (off_t)opts->num_bpp*parms.num_procs;
+ output_report("Transfer Buffer Size: %ld bytes, File size: %.2f MBs\n",
+ buf_size,((double)parms.num_dsets * (double)parms.num_bytes) / ONE_MB);
+
+ print_indent(1);
+ output_report(" # of files: %ld, # of datasets: %ld, dataset size: %.2f MBs\n",
+ parms.num_files, parms.num_dsets, (double)(opts->num_bpp*parms.num_procs)/ONE_MB);
+ }
if (opts->io_types & PIO_POSIX)
run_test(POSIXIO, parms, opts);
@@ -479,7 +507,7 @@ run_test_loop(struct options *opts)
if (destroy_comm_world() != SUCCESS) {
/* do something harsh */
}
- }
+ }
}
}
@@ -505,6 +533,10 @@ run_test(iotype iot, parameters parms, struct options *opts)
minmax *read_mm_table=NULL;
minmax *read_gross_mm_table=NULL;
minmax *read_raw_mm_table=NULL;
+ minmax *read_open_mm_table=NULL;
+ minmax *read_close_mm_table=NULL;
+ minmax *write_open_mm_table=NULL;
+ minmax *write_close_mm_table=NULL;
minmax write_mpi_mm = {0.0, 0.0, 0.0, 0};
minmax write_mm = {0.0, 0.0, 0.0, 0};
minmax write_gross_mm = {0.0, 0.0, 0.0, 0};
@@ -513,6 +545,10 @@ run_test(iotype iot, parameters parms, struct options *opts)
minmax read_mm = {0.0, 0.0, 0.0, 0};
minmax read_gross_mm = {0.0, 0.0, 0.0, 0};
minmax read_raw_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_open_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_close_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_open_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_close_mm = {0.0, 0.0, 0.0, 0};
raw_size = (off_t)parms.num_dsets * (off_t)parms.num_bytes;
parms.io_type = iot;
@@ -542,12 +578,16 @@ run_test(iotype iot, parameters parms, struct options *opts)
write_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
write_gross_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
write_raw_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ write_open_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ write_close_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
if (!parms.h5_write_only) {
read_mpi_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
read_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
read_gross_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
read_raw_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_open_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
+ read_close_mm_table = calloc((size_t)parms.num_iters , sizeof(minmax));
}
/* Do IO iteration times, collecting statistics each time */
@@ -559,28 +599,40 @@ run_test(iotype iot, parameters parms, struct options *opts)
/* gather all of the "mpi write" times */
t = get_time(res.timers, HDF5_MPI_WRITE);
- get_minmax(&write_mpi_mm, t);
+ get_minmax(&write_mpi_mm, t);
write_mpi_mm_table[i] = write_mpi_mm;
/* gather all of the "write" times */
t = get_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS);
- get_minmax(&write_mm, t);
+ get_minmax(&write_mm, t);
write_mm_table[i] = write_mm;
/* gather all of the "write" times from open to close */
t = get_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS);
- get_minmax(&write_gross_mm, t);
+ get_minmax(&write_gross_mm, t);
write_gross_mm_table[i] = write_gross_mm;
/* gather all of the raw "write" times */
t = get_time(res.timers, HDF5_RAW_WRITE_FIXED_DIMS);
- get_minmax(&write_raw_mm, t);
+ get_minmax(&write_raw_mm, t);
write_raw_mm_table[i] = write_raw_mm;
+ /* gather all of the file open times (time from open to first write) */
+ t = get_time(res.timers, HDF5_FILE_WRITE_OPEN);
+ get_minmax(&write_open_mm, t);
+
+ write_open_mm_table[i] = write_open_mm;
+
+ /* gather all of the file close times (time from last write to close) */
+ t = get_time(res.timers, HDF5_FILE_WRITE_CLOSE);
+ get_minmax(&write_close_mm, t);
+
+ write_close_mm_table[i] = write_close_mm;
+
if (!parms.h5_write_only) {
/* gather all of the "mpi read" times */
t = get_time(res.timers, HDF5_MPI_READ);
@@ -605,9 +657,22 @@ run_test(iotype iot, parameters parms, struct options *opts)
get_minmax(&read_raw_mm, t);
read_raw_mm_table[i] = read_raw_mm;
- }
- pio_time_destroy(res.timers);
+ /* gather all of the file open times (time from open to first read) */
+ t = get_time(res.timers, HDF5_FILE_READ_OPEN);
+ get_minmax(&read_open_mm, t);
+
+ read_open_mm_table[i] = read_open_mm;
+
+ /* gather all of the file close times (time from last read to close) */
+ t = get_time(res.timers, HDF5_FILE_READ_CLOSE);
+ get_minmax(&read_close_mm, t);
+
+ read_close_mm_table[i] = read_close_mm;
+
+ }
+
+ pio_time_destroy(res.timers);
}
/*
@@ -656,6 +721,21 @@ run_test(iotype iot, parameters parms, struct options *opts)
}
output_results(opts,"Write Open-Close",write_gross_mm_table,parms.num_iters,raw_size);
+ /* Print out time from open to first write */
+ if (pio_debug_level >= 3) {
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Write file open details:\n");
+ output_all_info(write_open_mm_table, parms.num_iters, 4);
+ }
+
+ /* Print out time from last write to close */
+ if (pio_debug_level >= 3) {
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Write file close details:\n");
+ output_all_info(write_close_mm_table, parms.num_iters, 4);
+ }
if (!parms.h5_write_only) {
/* Read statistics */
@@ -703,6 +783,23 @@ run_test(iotype iot, parameters parms, struct options *opts)
output_results(opts, "Read Open-Close", read_gross_mm_table,
parms.num_iters, raw_size);
+
+ /* Print out time from open to first read */
+ if (pio_debug_level >= 3) {
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Read file open details:\n");
+ output_all_info(read_open_mm_table, parms.num_iters, 4);
+ }
+
+ /* Print out time from last read to close */
+ if (pio_debug_level >= 3) {
+ /* output all of the times for all iterations */
+ print_indent(3);
+ output_report("Read file close details:\n");
+ output_all_info(read_close_mm_table, parms.num_iters, 4);
+ }
+
}
/* clean up our mess */
@@ -710,12 +807,16 @@ run_test(iotype iot, parameters parms, struct options *opts)
free(write_mm_table);
free(write_gross_mm_table);
free(write_raw_mm_table);
+ free(write_open_mm_table);
+ free(write_close_mm_table);
if (!parms.h5_write_only) {
free(read_mpi_mm_table);
free(read_mm_table);
free(read_gross_mm_table);
free(read_raw_mm_table);
+ free(read_open_mm_table);
+ free(read_close_mm_table);
}
return ret_value;
@@ -1010,9 +1111,6 @@ report_parameters(struct options *opts)
HDfprintf(output, "rank %d: IO API=", rank);
print_io_api(opts->io_types);
- HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
- recover_size_and_print((long_long)opts->num_bpp, "\n");
-
HDfprintf(output, "rank %d: Number of files=%Hd\n", rank,
(long_long)opts->num_files);
HDfprintf(output, "rank %d: Number of datasets=%Hd\n", rank,
@@ -1022,21 +1120,60 @@ report_parameters(struct options *opts)
HDfprintf(output, "rank %d: Number of processes=%d:%d\n", rank,
opts->min_num_procs, opts->max_num_procs);
+ if (opts->dim2d){
+ HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
+ recover_size_and_print((long_long)(opts->num_bpp * opts->num_bpp * opts->min_num_procs), ":");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->num_bpp * opts->max_num_procs), "\n");
+
+ HDfprintf(output, "rank %d: Size of dataset(s)=", rank);
+ recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), "x");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), ":");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "x");
+ recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "\n");
+
+ HDfprintf(output, "rank %d: File size=", rank);
+ recover_size_and_print((long_long)(pow(opts->num_bpp * opts->min_num_procs,2)
+ * opts->num_dsets), ":");
+ recover_size_and_print((long_long)(pow(opts->num_bpp * opts->max_num_procs,2)
+ * opts->num_dsets), "\n");
+
+ HDfprintf(output, "rank %d: Transfer buffer size=", rank);
+ if(opts->interleaved){
+ recover_size_and_print((long_long)opts->min_xfer_size, "x");
+ recover_size_and_print((long_long)opts->blk_size, ":");
+ recover_size_and_print((long_long)opts->max_xfer_size, "x");
+ recover_size_and_print((long_long)opts->blk_size, "\n");
+ }
+ else{
+ recover_size_and_print((long_long)opts->blk_size, "x");
+ recover_size_and_print((long_long)opts->min_xfer_size, ":");
+ recover_size_and_print((long_long)opts->blk_size, "x");
+ recover_size_and_print((long_long)opts->max_xfer_size, "\n");
+ }
+ HDfprintf(output, "rank %d: Block size=", rank);
+ recover_size_and_print((long_long)opts->blk_size, "x");
+ recover_size_and_print((long_long)opts->blk_size, "\n");
+ }
+ else{
+ HDfprintf(output, "rank %d: Number of bytes per process per dataset=", rank);
+ recover_size_and_print((long_long)opts->num_bpp, "\n");
+
HDfprintf(output, "rank %d: Size of dataset(s)=", rank);
recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs), ":");
recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs), "\n");
HDfprintf(output, "rank %d: File size=", rank);
recover_size_and_print((long_long)(opts->num_bpp * opts->min_num_procs
- * opts->num_dsets), ":");
+ * opts->num_dsets), ":");
recover_size_and_print((long_long)(opts->num_bpp * opts->max_num_procs
- * opts->num_dsets), "\n");
+ * opts->num_dsets), "\n");
HDfprintf(output, "rank %d: Transfer buffer size=", rank);
recover_size_and_print((long_long)opts->min_xfer_size, ":");
recover_size_and_print((long_long)opts->max_xfer_size, "\n");
HDfprintf(output, "rank %d: Block size=", rank);
recover_size_and_print((long_long)opts->blk_size, "\n");
+ }
HDfprintf(output, "rank %d: Block Pattern in Dataset=", rank);
if(opts->interleaved)
@@ -1050,6 +1187,12 @@ report_parameters(struct options *opts)
else
HDfprintf(output, "Independent\n");
+ HDfprintf(output, "rank %d: Geometry=", rank);
+ if(opts->dim2d)
+ HDfprintf(output, "2D\n");
+ else
+ HDfprintf(output, "1D\n");
+
HDfprintf(output, "rank %d: VFL used for HDF5 I/O=", rank);
if(opts->h5_use_mpi_posix)
HDfprintf(output, "MPI-posix driver\n");
@@ -1083,6 +1226,7 @@ report_parameters(struct options *opts)
* Return: Pointer to an OPTIONS structure
* Programmer: Bill Wendling, 31. October 2001
* Modifications:
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
static struct options *
parse_command_line(int argc, char *argv[])
@@ -1096,15 +1240,16 @@ parse_command_line(int argc, char *argv[])
cl_opts->io_types = 0; /* will set default after parsing options */
cl_opts->num_dsets = 1;
cl_opts->num_files = 1;
- cl_opts->num_bpp = 256 * ONE_KB;
+ cl_opts->num_bpp = 0;
cl_opts->num_iters = 1;
cl_opts->max_num_procs = comm_world_nprocs_g;
cl_opts->min_num_procs = 1;
- cl_opts->max_xfer_size = 1 * ONE_MB;
- cl_opts->min_xfer_size = 128 * ONE_KB;
- cl_opts->blk_size = 128 * ONE_KB; /* Default to writing 128K per block */
+ cl_opts->max_xfer_size = 0;
+ cl_opts->min_xfer_size = 0;
+ cl_opts->blk_size = 0;
cl_opts->interleaved = 0; /* Default to contiguous blocks in dataset */
cl_opts->collective = 0; /* Default to independent I/O access */
+ cl_opts->dim2d = 0; /* Default to 1D */
cl_opts->print_times = FALSE; /* Printing times is off by default */
cl_opts->print_raw = FALSE; /* Printing raw data throughput is off by default */
cl_opts->h5_alignment = 1; /* No alignment for HDF5 objects by default */
@@ -1211,10 +1356,10 @@ parse_command_line(int argc, char *argv[])
/* Turn on time printing */
cl_opts->print_times = TRUE;
break;
- case 'v':
+ case 'v':
/* Turn on verify data correctness*/
- cl_opts->verify = TRUE;
- break;
+ cl_opts->verify = TRUE;
+ break;
default:
fprintf(stderr, "pio_perf: invalid --debug option %s\n", buf);
exit(EXIT_FAILURE);
@@ -1235,6 +1380,9 @@ parse_command_line(int argc, char *argv[])
case 'F':
cl_opts->num_files = atoi(opt_arg);
break;
+ case 'g':
+ cl_opts->dim2d = 1;
+ break;
case 'i':
cl_opts->num_iters = atoi(opt_arg);
break;
@@ -1275,28 +1423,50 @@ parse_command_line(int argc, char *argv[])
}
}
+
+ if (cl_opts->num_bpp == 0){
+ if (cl_opts->dim2d == 0)
+ cl_opts->num_bpp = 256 * ONE_KB;
+ else
+ cl_opts->num_bpp = 8 * ONE_KB;
+ }
+
+ if (cl_opts->max_xfer_size == 0)
+ cl_opts->max_xfer_size = cl_opts->num_bpp;
+
+ if (cl_opts->min_xfer_size == 0)
+ cl_opts->min_xfer_size = (cl_opts->num_bpp)/2;
+
+ if (cl_opts->blk_size == 0)
+ cl_opts->blk_size = (cl_opts->num_bpp)/2;
+
+
/* set default if none specified yet */
if (!cl_opts->io_types)
- cl_opts->io_types = PIO_HDF5 | PIO_MPI | PIO_POSIX; /* run all API */
+ cl_opts->io_types = PIO_HDF5 | PIO_MPI | PIO_POSIX; /* run all API */
/* verify parameters sanity. Adjust if needed. */
/* cap xfer_size with bytes per process */
- if (cl_opts->min_xfer_size > cl_opts->num_bpp)
- cl_opts->min_xfer_size = cl_opts->num_bpp;
- if (cl_opts->max_xfer_size > cl_opts->num_bpp)
- cl_opts->max_xfer_size = cl_opts->num_bpp;
+ if (!cl_opts->dim2d) {
+ if (cl_opts->min_xfer_size > cl_opts->num_bpp)
+ cl_opts->min_xfer_size = cl_opts->num_bpp;
+ if (cl_opts->max_xfer_size > cl_opts->num_bpp)
+ cl_opts->max_xfer_size = cl_opts->num_bpp;
+ }
if (cl_opts->min_xfer_size > cl_opts->max_xfer_size)
- cl_opts->min_xfer_size = cl_opts->max_xfer_size;
+ cl_opts->min_xfer_size = cl_opts->max_xfer_size;
+ if (cl_opts->blk_size > cl_opts->num_bpp )
+ cl_opts->blk_size = cl_opts->num_bpp;
/* check range of number of processes */
if (cl_opts->min_num_procs <= 0)
- cl_opts->min_num_procs = 1;
+ cl_opts->min_num_procs = 1;
if (cl_opts->max_num_procs <= 0)
- cl_opts->max_num_procs = 1;
+ cl_opts->max_num_procs = 1;
if (cl_opts->min_num_procs > cl_opts->max_num_procs)
- cl_opts->min_num_procs = cl_opts->max_num_procs;
+ cl_opts->min_num_procs = cl_opts->max_num_procs;
/* check iteration */
if (cl_opts->num_iters <= 0)
- cl_opts->num_iters = 1;
+ cl_opts->num_iters = 1;
return cl_opts;
}
@@ -1356,6 +1526,7 @@ parse_size_directive(const char *size)
* Return: Nothing
* Programmer: Bill Wendling, 31. October 2001
* Modifications:
+ * Added 2D testing (Christian Chilan, 10. August 2005)
*/
static void
usage(const char *prog)
@@ -1376,7 +1547,7 @@ usage(const char *prog)
#endif /* 0 */
printf(" -B S, --block-size=S Block size within transfer buffer\n");
printf(" (see below for description)\n");
- printf(" [default:128K]\n");
+ printf(" [default: half the number of bytes per processor per dataset]\n");
printf(" -c, --chunk Create HDF5 datasets chunked [default: off]\n");
printf(" -C, --collective Use collective I/O for MPI and HDF5 APIs\n");
printf(" [default: off (i.e. independent I/O)]\n");
@@ -1384,8 +1555,9 @@ usage(const char *prog)
printf(" -D DL, --debug=DL Indicate the debugging level\n");
printf(" [default: no debugging]\n");
printf(" -e S, --num-bytes=S Number of bytes per process per dataset\n");
- printf(" [default: 256K]\n");
+ printf(" [default: 256K for 1D, 8K for 2D]\n");
printf(" -F N, --num-files=N Number of files [default: 1]\n");
+ printf(" -g, --geometry Use 2D geometry [default: 1D]\n");
printf(" -i N, --num-iterations=N Number of iterations to perform [default: 1]\n");
printf(" -I, --interleaved Interleaved block I/O (see below for example)\n");
printf(" [default: Contiguous block I/O]\n");
@@ -1398,8 +1570,10 @@ usage(const char *prog)
printf(" -T S, --threshold=S Threshold for alignment of objects in HDF5 file\n");
printf(" [default: 1]\n");
printf(" -w, --write-only Perform write tests not the read tests\n");
- printf(" -x S, --min-xfer-size=S Minimum transfer buffer size [default: 128K]\n");
- printf(" -X S, --max-xfer-size=S Maximum transfer buffer size [default: 1M]\n");
+ printf(" -x S, --min-xfer-size=S Minimum transfer buffer size\n");
+ printf(" [default: half the number of bytes per processor per dataset]\n");
+ printf(" -X S, --max-xfer-size=S Maximum transfer buffer size\n");
+ printf(" [default: the number of bytes per processor per dataset]\n");
printf("\n");
printf(" F - is a filename.\n");
printf(" N - is an integer >=0.\n");
diff --git a/perform/pio_perf.h b/perform/pio_perf.h
index 944b8fd..f9a8e03 100644
--- a/perform/pio_perf.h
+++ b/perform/pio_perf.h
@@ -17,8 +17,13 @@
#define PIO_PERF_H__
#include "pio_timer.h"
+#ifndef STANDALONE
#include "H5private.h"
#include "h5test.h"
+#include "h5tools_utils.h"
+#else
+#include "pio_standalone.h"
+#endif
/* setup the dataset no fill option if this is v1.5 or more */
#if H5_VERS_MAJOR > 1 || H5_VERS_MINOR > 4
@@ -43,6 +48,7 @@ typedef struct parameters_ {
size_t blk_size; /* Block size */
unsigned interleaved; /* Interleaved vs. contiguous blocks */
unsigned collective; /* Collective vs. independent I/O */
+ unsigned dim2d; /* 1D vs. 2D */
hsize_t h5_align; /* HDF5 object alignment */
hsize_t h5_thresh; /* HDF5 object alignment threshold */
int h5_use_chunks; /* Make HDF5 dataset chunked */
diff --git a/perform/pio_standalone.c b/perform/pio_standalone.c
new file mode 100644
index 0000000..e404274
--- /dev/null
+++ b/perform/pio_standalone.c
@@ -0,0 +1,282 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+/* This file contains the definition of functions required to build h5perf in
+ * STANDALONE mode.
+ * Created: Christian Chilan, 2005/5/18.
+ */
+
+#include "pio_perf.h"
+
+
+/** From h5tools_utils.c **/
+
+/* global variables */
+int nCols = 80;
+
+/* ``get_option'' variables */
+int opt_err = 1; /*get_option prints errors if this is on */
+int opt_ind = 1; /*token pointer */
+const char *opt_arg; /*flag argument (or value) */
+
+
+int
+get_option(int argc, const char **argv, const char *opts, const struct long_options *l_opts)
+{
+ static int sp = 1; /* character index in current token */
+ int opt_opt = '?'; /* option character passed back to user */
+
+ if (sp == 1) {
+ /* check for more flag-like tokens */
+ if (opt_ind >= argc || argv[opt_ind][0] != '-' || argv[opt_ind][1] == '\0') {
+ return EOF;
+ } else if (HDstrcmp(argv[opt_ind], "--") == 0) {
+ opt_ind++;
+ return EOF;
+ }
+ }
+
+ if (sp == 1 && argv[opt_ind][0] == '-' && argv[opt_ind][1] == '-') {
+ /* long command line option */
+ const char *arg = &argv[opt_ind][2];
+ int i;
+
+ for (i = 0; l_opts && l_opts[i].name; i++) {
+ size_t len = HDstrlen(l_opts[i].name);
+
+ if (HDstrncmp(arg, l_opts[i].name, len) == 0) {
+ /* we've found a matching long command line flag */
+ opt_opt = l_opts[i].shortval;
+
+ if (l_opts[i].has_arg != no_arg) {
+ if (arg[len] == '=') {
+ opt_arg = &arg[len + 1];
+ } else if (opt_ind < (argc - 1) && argv[opt_ind + 1][0] != '-') {
+ opt_arg = argv[++opt_ind];
+ } else if (l_opts[i].has_arg == require_arg) {
+ if (opt_err)
+ HDfprintf(stderr,
+ "%s: option required for \"--%s\" flag\n",
+ argv[0], arg);
+
+ opt_opt = '?';
+ }
+ } else {
+ if (arg[len] == '=') {
+ if (opt_err)
+ HDfprintf(stderr,
+ "%s: no option required for \"%s\" flag\n",
+ argv[0], arg);
+
+ opt_opt = '?';
+ }
+
+ opt_arg = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (l_opts[i].name == NULL) {
+ /* exhausted all of the l_opts we have and still didn't match */
+ if (opt_err)
+ HDfprintf(stderr, "%s: unknown option \"%s\"\n", argv[0], arg);
+
+ opt_opt = '?';
+ }
+
+ opt_ind++;
+ sp = 1;
+ } else {
+ register char *cp; /* pointer into current token */
+
+ /* short command line option */
+ opt_opt = argv[opt_ind][sp];
+
+ if (opt_opt == ':' || (cp = strchr(opts, opt_opt)) == 0) {
+
+ if (opt_err)
+ HDfprintf(stderr, "%s: unknown option \"%c\"\n",
+ argv[0], opt_opt);
+
+ /* if no chars left in this token, move to next token */
+ if (argv[opt_ind][++sp] == '\0') {
+ opt_ind++;
+ sp = 1;
+ }
+
+ return '?';
+ }
+
+ if (*++cp == ':') {
+ /* if a value is expected, get it */
+ if (argv[opt_ind][sp + 1] != '\0') {
+ /* flag value is rest of current token */
+ opt_arg = &argv[opt_ind++][sp + 1];
+ } else if (++opt_ind >= argc) {
+ if (opt_err)
+ HDfprintf(stderr,
+ "%s: value expected for option \"%c\"\n",
+ argv[0], opt_opt);
+
+ opt_opt = '?';
+ } else {
+ /* flag value is next token */
+ opt_arg = argv[opt_ind++];
+ }
+
+ sp = 1;
+ } else {
+ /* set up to look at next char in token, next time */
+ if (argv[opt_ind][++sp] == '\0') {
+ /* no more in current token, so setup next token */
+ opt_ind++;
+ sp = 1;
+ }
+
+ opt_arg = NULL;
+ }
+ }
+
+ /* return the current flag character found */
+ return opt_opt;
+}
+
+
+void
+print_version(const char *progname)
+{
+ printf("%s: Version %u.%u.%u%s%s\n",
+ progname, H5_VERS_MAJOR, H5_VERS_MINOR, H5_VERS_RELEASE,
+ H5_VERS_SUBRELEASE[0] ? "-" : "", H5_VERS_SUBRELEASE);
+}
+
+
+
+/** From h5test.c **/
+
+#ifdef H5_HAVE_PARALLEL
+MPI_Info h5_io_info_g=MPI_INFO_NULL;/* MPI INFO object for IO */
+#endif
+
+int
+h5_set_info_object(void)
+{
+ char *envp; /* environment pointer */
+ int ret_value=0;
+
+ /* handle any MPI INFO hints via $HDF5_MPI_INFO */
+ if ((envp = getenv("HDF5_MPI_INFO")) != NULL){
+ char *next, *valp;
+
+
+ valp = envp = next = HDstrdup(envp);
+
+ /* create an INFO object if not created yet */
+ if (h5_io_info_g == MPI_INFO_NULL)
+ MPI_Info_create(&h5_io_info_g);
+
+ do {
+ size_t len;
+ char *key_val, *endp, *namep;
+
+ if (*valp == ';')
+ valp++;
+
+ /* copy key/value pair into temporary buffer */
+ len = strcspn(valp, ";");
+ next = &valp[len];
+ key_val = calloc(1, len + 1);
+
+ /* increment the next pointer past the terminating semicolon */
+ if (*next == ';')
+ ++next;
+
+ namep = HDstrncpy(key_val, valp, len);
+
+ /* pass up any beginning whitespaces */
+ while (*namep && (*namep == ' ' || *namep == '\t'))
+ namep++;
+
+ /* eat up any ending white spaces */
+ endp = &namep[strlen(namep) - 1];
+
+ while (endp && (*endp == ' ' || *endp == '\t'))
+ *endp-- = '\0';
+
+ /* find the '=' */
+
+ valp = HDstrchr(namep, '=');
+
+ if (valp != NULL) { /* it's a valid key/value pairing */
+ char *tmp_val = valp + 1;
+
+ /* change '=' to \0, move valp down one */
+ *valp-- = '\0';
+
+ /* eat up ending whitespace on the "key" part */
+ while (*valp == ' ' || *valp == '\t')
+ *valp-- = '\0';
+
+ valp = tmp_val;
+
+ /* eat up beginning whitespace on the "value" part */
+ while (*valp == ' ' || *valp == '\t')
+ *valp++ = '\0';
+
+ /* actually set the darned thing */
+ if (MPI_SUCCESS != MPI_Info_set(h5_io_info_g, namep, valp)) {
+ printf("MPI_Info_set failed\n");
+ ret_value = -1;
+ }
+ }
+
+ valp = next;
+ HDfree(key_val);
+ } while (next && *next);
+
+ HDfree(envp);
+ }
+
+ return ret_value;
+}
+
+
+void
+h5_dump_info_object(MPI_Info info)
+{
+ char key[MPI_MAX_INFO_KEY+1];
+ char value[MPI_MAX_INFO_VAL+1];
+ int flag;
+ int i, nkeys;
+
+ printf("Dumping MPI Info Object(%d) (up to %d bytes per item):\n", (int)info,
+ MPI_MAX_INFO_VAL);
+ if (info==MPI_INFO_NULL){
+ printf("object is MPI_INFO_NULL\n");
+ }
+ else {
+ MPI_Info_get_nkeys(info, &nkeys);
+ printf("object has %d items\n", nkeys);
+ for (i=0; i<nkeys; i++){
+ MPI_Info_get_nthkey(info, i, key);
+ MPI_Info_get(info, key, MPI_MAX_INFO_VAL, value, &flag);
+ printf("%s=%s\n", key, value);
+ }
+
+ }
+}
diff --git a/perform/pio_standalone.h b/perform/pio_standalone.h
new file mode 100644
index 0000000..af67a35
--- /dev/null
+++ b/perform/pio_standalone.h
@@ -0,0 +1,197 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef PIO_STANDALONE_H__
+#define PIO_PERF_H__
+
+/* Header file for building h5perf by standalone mode.
+ * Created: Christian Chilan, 2005/5/18.
+ */
+
+/** From H5private.h **/
+
+#include "H5public.h" /* Include Public Definitions */
+
+
+/*
+ * Include ANSI-C header files.
+ */
+#ifdef H5_STDC_HEADERS
+# include <assert.h>
+# include <ctype.h>
+# include <errno.h>
+# include <fcntl.h>
+# include <float.h>
+# include <limits.h>
+# include <math.h>
+# include <signal.h>
+# include <stdarg.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <string.h>
+#endif
+
+/*
+ * And now for a couple non-Posix functions... Watch out for systems that
+ * define these in terms of macros.
+ */
+#ifdef _WIN32
+#define HDstrdup(S) _strdup(S)
+#else /* _WIN32 */
+
+#if !defined strdup && !defined H5_HAVE_STRDUP
+extern char *strdup(const char *s);
+#endif
+
+#define HDstrdup(S) strdup(S)
+
+#endif /* _WIN32 */
+
+H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...);
+#define HDstrcmp(S,T) strcmp(S,T)
+#define HDstrlen(S) strlen(S)
+#define HDstrncmp(S,T,L) strncmp(S,T,L)
+#define HDstrncpy(X,Y,Z) strncpy(X,Y,Z)
+#define HDstrchr(S,C) strchr(S,C)
+#define HDfree(M) free(M)
+
+
+#ifdef _O_BINARY
+#define HDopen(S,F,M) open(S,F|_O_BINARY,M)
+#else
+#define HDopen(S,F,M) open(S,F,M)
+#endif
+#define HDclose(F) close(F)
+
+#ifdef _WIN32
+ #ifdef __MWERKS__
+ #define HDlseek(F,O,W) lseek(F,O,W)
+ #else /*MSVS */
+ #define HDlseek(F,O,W) _lseeki64(F,O,W)
+ #endif
+#else
+#define HDlseek(F,O,W) lseek(F,O,W)
+#endif
+
+#if defined (__MWERKS__)
+/* workaround for a bug in the Metrowerks version 6.0 header file for write
+ which is not defined as const void*
+ */
+#define HDwrite(F,M,Z) write(F,(void*)M,Z)
+#else
+#define HDwrite(F,M,Z) write(F,M,Z)
+#endif
+
+#define HDread(F,M,Z) read(F,M,Z)
+
+#ifdef _WIN32
+ #ifdef __MWERKS__
+ #define HDstat(S,B) stat(S,B)
+ #else /*MSVC*/
+ #define HDstat(S,B) _stati64(S,B)
+ #endif
+#else
+#define HDstat(S,B) stat(S,B)
+#endif
+
+#ifdef _WIN32
+ #ifdef __MWERKS__
+ #define HDfstat(F,B) fstat(F,B)
+ typedef struct stat h5_stat_t;
+ typedef off_t h5_stat_size_t;
+ #else /*MSVC*/
+ #define HDfstat(F,B) _fstati64(F,B)
+ typedef struct _stati64 h5_stat_t;
+ typedef __int64 h5_stat_size_t;
+ #endif
+#else
+#define HDfstat(F,B) fstat(F,B)
+typedef struct stat h5_stat_t;
+typedef off_t h5_stat_size_t;
+#endif
+
+/*
+ * HDF Boolean type.
+ */
+#ifndef FALSE
+# define FALSE 0
+#endif
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+/*
+ * Although `long long' is part of the revised ANSI-C some compilers don't
+ * support it yet. We define `long_long' as the longest integral integer type
+ * supported by the compiler, usually 64 bits. It must be legal to qualify
+ * `long_long' with `unsigned'.
+ */
+#if H5_SIZEOF_LONG_LONG>0
+# define long_long long long
+#elif H5_SIZEOF___INT64>0
+# define long_long __int64 /*Win32*/
+# undef H5_SIZEOF_LONG_LONG
+# define H5_SIZEOF_LONG_LONG H5_SIZEOF___INT64
+#else
+# define long_long long int
+# undef H5_SIZEOF_LONG_LONG
+# define H5_SIZEOF_LONG_LONG H5_SIZEOF_LONG
+#endif
+
+
+
+/** From h5test.h **/
+
+#ifdef H5_HAVE_PARALLEL
+extern MPI_Info h5_io_info_g; /* MPI INFO object for IO */
+#endif
+
+#ifdef H5_HAVE_PARALLEL
+H5TEST_DLL int h5_set_info_object(void);
+H5TEST_DLL void h5_dump_info_object(MPI_Info info);
+#endif
+
+
+
+/** From h5tools_utils.h **/
+
+extern int opt_err; /* getoption prints errors if this is on */
+extern int opt_ind; /* token pointer */
+extern const char *opt_arg; /* flag argument (or value) */
+
+
+enum {
+ no_arg = 0, /* doesn't take an argument */
+ require_arg, /* requires an argument */
+ optional_arg /* argument is optional */
+};
+
+
+typedef struct long_options {
+ const char *name; /* name of the long option */
+ int has_arg; /* whether we should look for an arg */
+ char shortval; /* the shortname equivalent of long arg
+ * this gets returned from get_option */
+} long_options;
+
+extern int get_option(int argc, const char **argv, const char *opt,
+ const struct long_options *l_opt);
+
+extern int nCols; /*max number of columns for outputting */
+
+/* Definitions of useful routines */
+extern void print_version(const char *progname);
+
+#endif
diff --git a/perform/pio_timer.c b/perform/pio_timer.c
index 3574fdf..66f9eb0 100644
--- a/perform/pio_timer.c
+++ b/perform/pio_timer.c
@@ -40,6 +40,22 @@
pio_time *timer_g; /* timer: global for stub functions */
/*
+ * Function: sub_time
+ * Purpose: Struct two time values, and return the difference, in microseconds
+ *
+ * Note that the function assumes that a > b
+ * Programmer: Leon Arber, 1/27/06
+ */
+static double sub_time(struct timeval* a, struct timeval* b)
+{
+ return (((double)a->tv_sec +
+ ((double)a->tv_usec) / MICROSECOND) -
+ ((double)b->tv_sec +
+ ((double)b->tv_usec) / MICROSECOND));
+}
+
+
+/*
* Function: pio_time_new
* Purpose: Build us a brand, spankin', new performance time object.
* The object is a black box to the user. They just tell us
@@ -120,21 +136,55 @@ set_time(pio_time *pt, timer_type t, int start_stop)
if (pt->type == MPI_TIMER) {
if (start_stop == START) {
pt->mpi_timer[t] = MPI_Wtime();
+
+ /* When we start the timer for HDF5_FINE_WRITE_FIXED_DIMS or HDF5_FINE_READ_FIXED_DIMS
+ * we compute the time it took to only open the file */
+ if(t == HDF5_FINE_WRITE_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_WRITE_OPEN] += pt->mpi_timer[t] - pt->mpi_timer[HDF5_GROSS_WRITE_FIXED_DIMS];
+ else if(t == HDF5_FINE_READ_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_READ_OPEN] += pt->mpi_timer[t] - pt->mpi_timer[HDF5_GROSS_READ_FIXED_DIMS];
+
} else {
pt->total_time[t] += MPI_Wtime() - pt->mpi_timer[t];
+ pt->mpi_timer[t] = MPI_Wtime();
+
+ /* When we stop the timer for HDF5_GROSS_WRITE_FIXED_DIMS or HDF5_GROSS_READ_FIXED_DIMS
+ * we compute the time it took to close the file after the last read/write finished */
+ if(t == HDF5_GROSS_WRITE_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_WRITE_CLOSE] += pt->mpi_timer[t] - pt->mpi_timer[HDF5_FINE_WRITE_FIXED_DIMS];
+ else if(t == HDF5_GROSS_READ_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_READ_CLOSE] += pt->mpi_timer[t] - pt->mpi_timer[HDF5_FINE_READ_FIXED_DIMS];
}
} else {
if (start_stop == START) {
gettimeofday(&pt->sys_timer[t], NULL);
+
+ /* When we start the timer for HDF5_FINE_WRITE_FIXED_DIMS or HDF5_FINE_READ_FIXED_DIMS
+ * we compute the time it took to only open the file */
+ if(t == HDF5_FINE_WRITE_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_WRITE_OPEN] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_GROSS_WRITE_FIXED_DIMS]));
+ else if(t == HDF5_FINE_READ_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_READ_OPEN] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_GROSS_READ_FIXED_DIMS]));
+
+
} else {
struct timeval sys_t;
gettimeofday(&sys_t, NULL);
- pt->total_time[t] +=
- ((double)sys_t.tv_sec +
+ pt->total_time[t] += sub_time(&sys_t, &(pt->sys_timer[t]));
+
+/* ((double)sys_t.tv_sec +
((double)sys_t.tv_usec) / MICROSECOND) -
((double)pt->sys_timer[t].tv_sec +
- ((double)pt->sys_timer[t].tv_usec) / MICROSECOND);
+ ((double)pt->sys_timer[t].tv_usec) / MICROSECOND);*/
+
+ /* When we stop the timer for HDF5_GROSS_WRITE_FIXED_DIMS or HDF5_GROSS_READ_FIXED_DIMS
+ * we compute the time it took to close the file after the last read/write finished */
+ if(t == HDF5_GROSS_WRITE_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_WRITE_CLOSE] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_FINE_WRITE_FIXED_DIMS]));
+ else if(t == HDF5_GROSS_READ_FIXED_DIMS)
+ pt->total_time[HDF5_FILE_READ_CLOSE] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_FINE_READ_FIXED_DIMS]));
+
}
}
@@ -203,3 +253,6 @@ get_time(pio_time *pt, timer_type t)
}
#endif /* H5_HAVE_PARALLEL */
+#ifdef STANDALONE
+#include "pio_standalone.c"
+#endif
diff --git a/perform/pio_timer.h b/perform/pio_timer.h
index 2d2104f..943521a 100644
--- a/perform/pio_timer.h
+++ b/perform/pio_timer.h
@@ -33,6 +33,10 @@ typedef enum timer_type_ {
HDF5_DATASET_CREATE,
HDF5_MPI_WRITE,
HDF5_MPI_READ,
+ HDF5_FILE_READ_OPEN,
+ HDF5_FILE_READ_CLOSE,
+ HDF5_FILE_WRITE_OPEN,
+ HDF5_FILE_WRITE_CLOSE,
HDF5_FINE_WRITE_FIXED_DIMS,
HDF5_FINE_READ_FIXED_DIMS,
HDF5_GROSS_WRITE_FIXED_DIMS,