From ed0f0bee7309ecb9ce9cba829423b5626e2af55b Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Mon, 16 Dec 2002 21:26:44 +0000
Subject: [PATCH] Document how to use MPICH with srun

---
 doc/man/man1/srun.1  | 48 ++++++++++++++++++++++++++++++++++++++++++++
 src/srun/mpirun.elan | 22 ++++++++++++++++++++
 2 files changed, 70 insertions(+)
 create mode 100644 src/srun/mpirun.elan

diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 44c162ad083..36cf9a3ff5f 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -458,6 +458,54 @@ total number of processes in the current job
 SLURM_NODELIST
 list of nodes that the slurm job is executing on.
 
+.SH "MPI SUPPORT"
+On computers with a Quadrics interconnect, \fBsrun\fR directly supports 
+the Quadrics version of MPI without modification. Applications build 
+using the Quadrics MPI library will communicate directly over the 
+Quadrics interconnect without any special \fBsrun\fR options.
+
+Users may also use MPICH on any computer where that is available. 
+The \fBmpirun\fR command may need to be provided with information 
+on its command line identifying the resources to be used. The 
+installer of the MPICH software may configure it to perform these 
+steps automatically. At worst, you must specify two parameters:
+.TP
+\fB\-np SLURM_NPROCS\fR
+number of processors to run on
+.TP
+\fB\-machinefile <machinefile>\fR
+list of computers on which to execute. This list can be constructed 
+executing the command \fBsrun /bin/hostname\fR and writing its standard 
+output to the desired file. Execute \fBmpirun \-\-help\fR for more options.
+
+.SH "EXAMPLES"
+.eo
+This example demonstrates how one executes a simple MPICH job
+in the event that it has not been configurated to automatically 
+set the required parameters (again, this is the worst cases scenario). 
+We use \fBsrun\fR to build a list of machines (nodes) to be used by 
+\fBmpirun\fR in its required format. A sample command line and 
+the script to be executed follow.
+.br
+> cat my_script
+#!/bin/csh
+.br
+srun /bin/hostname >nodes
+.br
+mpirun -np $SLURM_NPROCS -machinefile nodes /bin/hostname
+.br
+rm node_list
+.br
+> srun -N2 -n4 my_script
+.ec
+
+.eo
+If MPICH is configured to directly use SLURM, the execute line is 
+the much simpler:
+.br
+> mpirun -np 4 /bin/hostname
+.ec
+
 .SH "BUGS"
 If the number of processors per node allocated to a job is not evenly 
 divisible by the value of \fBcpus\-per\-node\fR, tasks may be initiated 
diff --git a/src/srun/mpirun.elan b/src/srun/mpirun.elan
new file mode 100644
index 00000000000..89ebf4f5071
--- /dev/null
+++ b/src/srun/mpirun.elan
@@ -0,0 +1,22 @@
+#! /bin/sh
+# Sample mpirun.<arch> file to be installed for direct use of srun
+if [ "$MPIR_HOME" = "" ] ; then 
+    MPIR_HOME=#MPIR_HOME#
+fi
+if [ "$MPIR_HOME" = "#""MPIR_HOME""#" ] ; then
+    MPIR_HOME=`pwd`/..
+fi
+if [ "#MPIRUN_BIN#" = "#""MPIRUN_BIN""#" ] ; then 
+    MPIRUN_HOME=$MPIR_HOME/bin
+else
+    MPIRUN_HOME=$MPIR_HOME/#MPIRUN_BIN#
+fi
+if [ "$argsset" = "" ] ; then
+   . $MPIRUN_HOME/mpirun.args
+   argsset=1
+fi
+if [ "$debugger" != "" ] ;  then
+    $Show "totalview srun -a -n$np $progname $cmdLineArgs"
+else
+    $Show srun -n $np $progname $cmdLineArgs  
+fi
-- 
GitLab