diff --git a/doc/html/quickstart.shtml b/doc/html/quickstart.shtml
index 7601c8538851e43e1619195295258416695efa4c..c7ab8b567fbd5c018c8b38fa8016f8e68d92fb6c 100644
--- a/doc/html/quickstart.shtml
+++ b/doc/html/quickstart.shtml
@@ -324,8 +324,8 @@ for more information.</p>
 
 <p><a href="http://www-unix.mcs.anl.gov/mpi/mpich1/"><b>MPICH1</b></a>
 has several different programming models. If you are using the shared
-memory model (DEFAULT_DEVICE=ch_shmem in the mpirun script), then 
-initiated the tasks using the <span class="commandline">srun</span> command 
+memory model (<i>DEFAULT_DEVICE=ch_shmem</i> in the mpirun script), then 
+initiate the tasks using the <span class="commandline">srun</span> command 
 with the <i>--mpi=mpich1_shmem</i> option.</p>
 <pre>
 $ srun -n16 --mpi=mpich1_shmem a.out
@@ -333,7 +333,7 @@ $ srun -n16 --mpi=mpich1_shmem a.out
 
 <p>Other MPICH1 programming models current rely upon the SLURM 
 <span class="commandline">salloc</span> or 
-<span class="commandline">sbatch</span> command to allocate.
+<span class="commandline">sbatch</span> command to allocate resources.
 In either case, specify the maximum number of tasks required for the job.
 You will then need to build a list of hosts to be used and use that 
 as an argument to the mpirun command. 
diff --git a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
index cadd9e8e0351bddd04cbad40ac6726e806cf2506..19bb60d01b4cfdfb74186594b7c1e82fbc16870a 100644
--- a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
+++ b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
@@ -83,7 +83,7 @@ int p_mpi_hook_slurmstepd_task(const mpi_plugin_task_info_t *job,
 			       char ***env)
 {
 	debug("Using mpi/mpich1_shmem");
-	env_array_overwrite_fmt(env, "MPIRUN_NP", "%u", job->ntasks);
+	env_array_overwrite_fmt(env, "MPICH_NP", "%u", job->ntasks);
 	return SLURM_SUCCESS;
 }