summaryrefslogtreecommitdiffstats
path: root/release_docs
diff options
context:
space:
mode:
Diffstat (limited to 'release_docs')
-rw-r--r--release_docs/INSTALL_parallel220
1 files changed, 203 insertions, 17 deletions
diff --git a/release_docs/INSTALL_parallel b/release_docs/INSTALL_parallel
index d6f1979..65014da 100644
--- a/release_docs/INSTALL_parallel
+++ b/release_docs/INSTALL_parallel
@@ -5,21 +5,21 @@
1. Overview
-----------
-This file contains instructions for the installation of parallel
-HDF5. Platforms supported by this release are SGI Origin 2000, IBM SP2,
-Intel TFLOPs, and Linux version 2.2 and greater. The steps are kind of
-unnatural and will be more automized in the next release. If you have
-difficulties installing the software in your system, please send mail to
+This file contains instructions for the installation of parallel HDF5 (PHDF5).
+PHDF5 requires an MPI compiler with MPI-IO support and a parallel file system.
+If you don't know yet, you should first consult with your system support staff
+of information how to compile an MPI program, how to run an MPI application,
+and how to access the parallel file system. There are sample MPI-IO C and
+Fortran programs in the section of "Sample programs". You can use them to
+run simple tests of your MPI compilers and the parallel file system.
- hdfparallel@ncsa.uiuc.edu
+If you still have difficulties installing PHDF5 in your system, please
+send mail to
+ hdfhelp@ncsa.uiuc.edu
-In your mail, please include the output of "uname -a". Also attach the
-content of "config.log" if you ran the "configure" command.
-
-First, you must obtain and unpack the HDF5 source as described in the
-INSTALL file. You also need to obtain the information of the include and
-library paths of MPI and MPIO software installed in your system since the
-parallel HDF5 library uses them for parallel I/O access.
+In your mail, please include the output of "uname -a". If you have run the
+"configure" command, attach the output of the command and the content of
+the file "config.log".
2. Quick Instruction for known systems
@@ -34,7 +34,7 @@ to the next section for more detailed explanations.
Know parallel compilers
------
-HDF5 knows serveral parallel compilers: mpicc, hcc, mpcc, mpcc_r.
+HDF5 knows several parallel compilers: mpicc, hcc, mpcc, mpcc_r.
To build parallel HDF5 with one of the above, just set CC as it
and configure. The "--enable-parallel" is optional in this case.
@@ -48,7 +48,7 @@ and configure. The "--enable-parallel" is optional in this case.
TFLOPS
------
-Follow the instuctions in INSTALL_TFLOPS.
+Follow the instructions in INSTALL_TFLOPS.
-------
IBM SP
@@ -76,6 +76,18 @@ Then do the following steps:
$ make install
+We also suggest that you add "-qxlf90=autodealloc" to FFLAGS when
+building parallel with fortran enabled. This can be done by invoking:
+
+ setenv FFLAGS -qxlf90=autodealloc # 32 bit build
+
+or
+
+ setenv FFLAGS "-q64 -qxlf90=autodealloc" # 64 bit build
+
+prior to running configure. Recall that the "-q64" is necessary
+for 64 bit builds.
+
---------------
SGI Origin 2000
Cray T3E
@@ -213,9 +225,9 @@ Parallel HDF5. It usually exits with non-zero code if a required MPI-IO
feature does not succeed as expected. One exception is the testing of
accessing files larger than 2GB. If the underlaying filesystem or if
the MPI-IO library fails to handle file sizes larger than 2GB, the test
-will print informational essages stating the failure but will not exit
+will print informational messages stating the failure but will not exit
with non-zero code. Failure to support file size greater than 2GB is
-not a fatal error for HDF5 becuase HDF5 can use other file-drivers such
+not a fatal error for HDF5 because HDF5 can use other file-drivers such
as families of files to by pass the file size limit.
By default, the parallel tests use the current directory as the test directory.
@@ -227,3 +239,177 @@ For example, if the tests should use directory /PFS/user/me, do
(In some batch job system, you many need to hardset HDF5_PARAPREFIX in
the shell initial files like .profile, .cshrc, etc.)
+
+
+5. Sample programs
+------------------
+
+Here are sample MPI-IO C and Fortran programs. You may use them to run simple
+tests of your MPI compilers and the parallel file system. The MPI commands
+used here are mpicc, mpif90 and mpirun. Replace them with the commands of
+your system.
+
+The programs assume they run in the parallel file system. Thus they create
+the test data file in the current directory. If the parallel file system
+is somewhere else, you need to run the sample programs there or edit the
+programs to use a different file name.
+
+Example compiling and running:
+
+% mpicc Sample_mpio.c -o c.out
+% mpirun -np 4 c.out
+
+% mpif90 Sample_mpio.f90 -o f.out
+% mpirun -np 4 f.out
+
+
+==> Sample_mpio.c <==
+/* Simple MPI-IO program testing if a parallel file can be created.
+ * Default filename can be specified via first program argument.
+ * Each process writes something, then reads all data back.
+ */
+
+#include <mpi.h>
+#ifndef MPI_FILE_NULL /*MPIO may be defined in mpi.h already */
+# include <mpio.h>
+#endif
+
+#define DIMSIZE 10 /* dimension size, avoid powers of 2. */
+#define PRINTID printf("Proc %d: ", mpi_rank)
+
+main(int ac, char **av)
+{
+ char hostname[128];
+ int mpi_size, mpi_rank;
+ MPI_File fh;
+ char *filename = "./mpitest.data";
+ char mpi_err_str[MPI_MAX_ERROR_STRING];
+ int mpi_err_strlen;
+ int mpi_err;
+ char writedata[DIMSIZE], readdata[DIMSIZE];
+ char expect_val;
+ int i, irank;
+ int nerrors = 0; /* number of errors */
+ MPI_Offset mpi_off;
+ MPI_Status mpi_stat;
+
+ MPI_Init(&ac, &av);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* get file name if provided */
+ if (ac > 1){
+ filename = *++av;
+ }
+ if (mpi_rank==0){
+ printf("Testing simple MPIO program with %d processes accessing file %s\n",
+ mpi_size, filename);
+ printf(" (Filename can be specified via program argument)\n");
+ }
+
+ /* show the hostname so that we can tell where the processes are running */
+ if (gethostname(hostname, 128) < 0){
+ PRINTID;
+ printf("gethostname failed\n");
+ return 1;
+ }
+ PRINTID;
+ printf("hostname=%s\n", hostname);
+
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
+ MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE,
+ MPI_INFO_NULL, &fh))
+ != MPI_SUCCESS){
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ return 1;
+ }
+
+ /* each process writes some data */
+ for (i=0; i < DIMSIZE; i++)
+ writedata[i] = mpi_rank*DIMSIZE + i;
+ mpi_off = mpi_rank*DIMSIZE;
+ if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE,
+ &mpi_stat))
+ != MPI_SUCCESS){
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, (int) DIMSIZE, mpi_err_str);
+ return 1;
+ };
+
+ /* make sure all processes has done writing. */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* each process reads all data and verify. */
+ for (irank=0; irank < mpi_size; irank++){
+ mpi_off = irank*DIMSIZE;
+ if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE,
+ &mpi_stat))
+ != MPI_SUCCESS){
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, (int) DIMSIZE, mpi_err_str);
+ return 1;
+ };
+ for (i=0; i < DIMSIZE; i++){
+ expect_val = irank*DIMSIZE + i;
+ if (readdata[i] != expect_val){
+ PRINTID;
+ printf("read data[%d:%d] got %d, expect %d\n", irank, i,
+ readdata[i], expect_val);
+ nerrors++;
+ }
+ }
+ }
+ if (nerrors)
+ return 1;
+
+ MPI_File_close(&fh);
+
+ PRINTID;
+ printf("all tests passed\n");
+
+ MPI_Finalize();
+ return 0;
+}
+
+==> Sample_mpio.f90 <==
+!
+! The following example demonstrates how to create and close a parallel
+! file using MPI-IO calls.
+!
+! USE MPI is the proper way to bring in MPI definitions but many
+! MPI Fortran compiler supports the pseudo standard of INCLUDE.
+! So, HDF5 uses the INCLUDE statement instead.
+!
+
+ PROGRAM MPIOEXAMPLE
+
+ ! USE MPI
+
+ IMPLICIT NONE
+
+ INCLUDE 'mpif.h'
+
+ CHARACTER(LEN=80), PARAMETER :: filename = "filef.h5" ! File name
+ INTEGER :: ierror ! Error flag
+ INTEGER :: fh ! File handle
+ INTEGER :: amode ! File access mode
+
+ call MPI_INIT(ierror)
+ amode = MPI_MODE_RDWR + MPI_MODE_CREATE + MPI_MODE_DELETE_ON_CLOSE
+ call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, amode, MPI_INFO_NULL, fh, ierror)
+ print *, "Trying to create ", filename
+ if ( ierror .eq. MPI_SUCCESS ) then
+ print *, "MPI_FILE_OPEN succeeded"
+ call MPI_FILE_CLOSE(fh, ierror)
+ else
+ print *, "MPI_FILE_OPEN failed"
+ endif
+
+ call MPI_FINALIZE(ierror);
+ END PROGRAM