summaryrefslogtreecommitdiffstats
path: root/release_docs
diff options
context:
space:
mode:
authorAlbert Cheng <acheng@hdfgroup.org>2004-09-22 14:42:20 (GMT)
committerAlbert Cheng <acheng@hdfgroup.org>2004-09-22 14:42:20 (GMT)
commit6481d1a82e93096f209805e4215eb6129b98ac6b (patch)
treed83e2fe4b0b8146e6c639794198a90fc64642057 /release_docs
parent9636e858cbe4c104cba7628276506462404f6191 (diff)
downloadhdf5-6481d1a82e93096f209805e4215eb6129b98ac6b.zip
hdf5-6481d1a82e93096f209805e4215eb6129b98ac6b.tar.gz
hdf5-6481d1a82e93096f209805e4215eb6129b98ac6b.tar.bz2
[svn-r9302] Updated with Sample MPIO programs.
Diffstat (limited to 'release_docs')
-rw-r--r--release_docs/INSTALL_parallel194
1 files changed, 181 insertions, 13 deletions
diff --git a/release_docs/INSTALL_parallel b/release_docs/INSTALL_parallel
index b40df03..64e85c4 100644
--- a/release_docs/INSTALL_parallel
+++ b/release_docs/INSTALL_parallel
@@ -5,21 +5,21 @@
1. Overview
-----------
-This file contains instructions for the installation of parallel
-HDF5. Platforms supported by this release are SGI Origin 2000, IBM SP2,
-Intel TFLOPs, and Linux version 2.2 and greater. The steps are kind of
-unnatural and will be more automized in the next release. If you have
-difficulties installing the software in your system, please send mail to
+This file contains instructions for the installation of parallel HDF5 (PHDF5).
+PHDF5 requires an MPI compiler with MPI-IO support and a parallel file system.
+If you don't know yet, you should first consult with your system support staff
+of information how to compile an MPI program, how to run an MPI application,
+and how to access the parallel file system. There are sample MPI-IO C and
+Fortran programs in the section of "Sample programs". You can use them to
+run simple tests of your MPI compilers and the parallel file system.
- hdfparallel@ncsa.uiuc.edu
+If you still have difficulties installing PHDF5 in your system, please
+send mail to
+ hdfhelp@ncsa.uiuc.edu
-In your mail, please include the output of "uname -a". Also attach the
-content of "config.log" if you ran the "configure" command.
-
-First, you must obtain and unpack the HDF5 source as described in the
-INSTALL file. You also need to obtain the information of the include and
-library paths of MPI and MPIO software installed in your system since the
-parallel HDF5 library uses them for parallel I/O access.
+In your mail, please include the output of "uname -a". If you have run the
+"configure" command, attach the output of the command and the content of
+the file "config.log".
2. Quick Instruction for known systems
@@ -239,3 +239,171 @@ For example, if the tests should use directory /PFS/user/me, do
(In some batch job system, you many need to hardset HDF5_PARAPREFIX in
the shell initial files like .profile, .cshrc, etc.)
+
+
+5. Sample programs
+------------------
+
+Here are sample MPI-IO C and Fortran programs. You may use them to run simple
+tests of your MPI compilers and the parallel file system. The MPI commands
+used here are mpicc, mpif90 and mpirun. Replace them with the commands of
+your system.
+
+The programs assume they run in the parallel file system. Thus they create
+the test data file in the current directory. If the parallel file system
+is somewhere else, you need to run the sample programs there or edit the
+programs to use a different file name.
+
+Example compiling and running:
+
+% mpicc Sample_mpio.c -o c.out
+% mpirun -np 4 c.out
+
+% mpif90 Sample_mpio.f90 -o f.out
+% mpirun -np 4 f.out
+
+
+==> Sample_mpio.c <==
+/* Simple MPI-IO program testing if a parallel file can be created.
+ * Default filename can be specified via first program argument.
+ * Each process writes something, then reads all data back.
+ */
+
+#include <mpi.h>
+#ifndef MPI_FILE_NULL /*MPIO may be defined in mpi.h already */
+# include <mpio.h>
+#endif
+
+#define DIMSIZE 10 /* dimension size, avoid powers of 2. */
+#define PRINTID printf("Proc %d: ", mpi_rank)
+
+main(int ac, char **av)
+{
+ char hostname[128];
+ int mpi_size, mpi_rank;
+ MPI_File fh;
+ char *filename = "./mpitest.data";
+ char mpi_err_str[MPI_MAX_ERROR_STRING];
+ int mpi_err_strlen;
+ int mpi_err;
+ char writedata[DIMSIZE], readdata[DIMSIZE];
+ char expect_val;
+ int i, irank;
+ int nerrors = 0; /* number of errors */
+ MPI_Offset mpi_off;
+ MPI_Status mpi_stat;
+
+ MPI_Init(&ac, &av);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* get file name if provided */
+ if (ac > 1){
+ filename = *++av;
+ }
+ if (mpi_rank==0){
+ printf("Testing simple MPIO program with %d processes accessing file %s\n",
+ mpi_size, filename);
+ printf(" (Filename can be specified via program argument)\n");
+ }
+
+ /* show the hostname so that we can tell where the processes are running */
+ if (gethostname(hostname, 128) < 0){
+ PRINTID;
+ printf("gethostname failed\n");
+ return 1;
+ }
+ PRINTID;
+ printf("hostname=%s\n", hostname);
+
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
+ MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE,
+ MPI_INFO_NULL, &fh))
+ != MPI_SUCCESS){
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ return 1;
+ }
+
+ /* each process writes some data */
+ for (i=0; i < DIMSIZE; i++)
+ writedata[i] = mpi_rank*DIMSIZE + i;
+ mpi_off = mpi_rank*DIMSIZE;
+ if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE,
+ &mpi_stat))
+ != MPI_SUCCESS){
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, (int) DIMSIZE, mpi_err_str);
+ return 1;
+ };
+
+ /* make sure all processes has done writing. */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* each process reads all data and verify. */
+ for (irank=0; irank < mpi_size; irank++){
+ mpi_off = irank*DIMSIZE;
+ if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE,
+ &mpi_stat))
+ != MPI_SUCCESS){
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, (int) DIMSIZE, mpi_err_str);
+ return 1;
+ };
+ for (i=0; i < DIMSIZE; i++){
+ expect_val = irank*DIMSIZE + i;
+ if (readdata[i] != expect_val){
+ PRINTID;
+ printf("read data[%d:%d] got %d, expect %d\n", irank, i,
+ readdata[i], expect_val);
+ nerrors++;
+ }
+ }
+ }
+ if (nerrors)
+ return 1;
+
+ MPI_File_close(&fh);
+
+ PRINTID;
+ printf("all tests passed\n");
+
+ MPI_Finalize();
+ return 0;
+}
+
+==> Sample_mpio.f90 <==
+!
+! The following example demonstrates how to create and close a parallel
+! file using MPI-IO calls.
+!
+
+ PROGRAM MPIOEXAMPLE
+
+ USE MPI
+
+ IMPLICIT NONE
+
+ CHARACTER(LEN=80), PARAMETER :: filename = "filef.h5" ! File name
+ INTEGER :: ierror ! Error flag
+ INTEGER :: fh ! File handle
+ INTEGER :: amode ! File access mode
+
+ call MPI_INIT(ierror)
+ amode = MPI_MODE_RDWR + MPI_MODE_CREATE + MPI_MODE_DELETE_ON_CLOSE
+ call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, amode, MPI_INFO_NULL, fh, ierror)
+ print *, "Trying to create ", filename
+ if ( ierror .eq. MPI_SUCCESS ) then
+ print *, "MPI_FILE_OPEN succeeded"
+ call MPI_FILE_CLOSE(fh, ierror)
+ else
+ print *, "MPI_FILE_OPEN failed"
+ endif
+
+ call MPI_FINALIZE(ierror);
+ END PROGRAM