diff --git a/NEWS b/NEWS
index 5db92fb6d2b9c5dd14f2e588b49d713a02edef9a..6484a50d54bbdbfe4acfc843f3c40711261f6a7b 100644
--- a/NEWS
+++ b/NEWS
@@ -220,6 +220,10 @@ documents those changes that are of interest to users and admins.
     prevented dynamic creation of new blocks when this state was present.
  -- Fixed bug where a users association limits were not enforced, only 
     parent limits were being enforced.
+ -- For OpenMPI use of SLURM reserved ports, reserve a count of ports equal to 
+    the maximum task count on any node plus one (the plus one is a correction).
+ -- Do not reset SLURM_TASKS_PER_NODE when srun --preserve-env option is used
+    (needed by OpenMPI).
 
 * Changes in SLURM 2.0.4
 ========================
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index f5e711eae0917699b5beb81f6cb2e1ea37ed546a..ece210ba7aea7d05d1caa4490a3cc36e56443f5e 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -318,7 +318,7 @@ function displayfile()
 </HEAD>
 <BODY>
 <FORM name=config>
-<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configration Tool</H1>
+<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool</H1>
 <P>This form can be used to create a SLURM configuration file with 
 you controlling many of the important configuration parameters.</P>
 
@@ -327,7 +327,7 @@ Configuration files for other versions of SLURM should be built
 using the tool distributed with it in <i>doc/html/configurator.html</i>.
 Some parameters will be set to default values, but you can
 manually edit the resulting <I>slurm.conf</I> as desired
-for greater flexibiilty. See <I>man slurm.conf</I> for more
+for greater flexibility. See <I>man slurm.conf</I> for more
 details about the configuration parameters.</P>
 
 <P>Note the while SLURM daemons create log files and other files as needed, 
@@ -377,7 +377,7 @@ You can also specify addresses of these computers if desired
 Only a few of the possible parameters associated with the nodes will 
 be set by this tool, but many others are available. 
 All of the nodes will be placed into a single partition (or queue) 
-with global access. Many options are availble to group nodes into 
+with global access. Many options are available to group nodes into 
 partitions with a wide variety of configuration parameters. 
 Manually edit the <i>slurm.conf</i> produced to exercise these options.
 Node names and addresses may be specified using a numeric range specification.
@@ -455,10 +455,10 @@ after making any changes to system password or group databases.
 <P>
 
 <H2>SLURM Port Numbers</H2>
-The SLURM controller (slurmctld) requires a unique port for communcations 
-as do the SLURM compute node deamonds (slurmd). If not set, slurm ports 
+The SLURM controller (slurmctld) requires a unique port for communications 
+as do the SLURM compute node daemons (slurmd). If not set, slurm ports 
 are set by checking for an entry in <I>/etc/services</I> and if that 
-fails by using an interal default set at SLURM build time. 
+fails by using an interval default set at SLURM build time. 
 <P>
 <input type="text" name="slurmctld_port" value="6817"> <B>SlurmctldPort</B>
 <P>
@@ -530,15 +530,19 @@ to Maui (configuration parameter <B>SchedulerPort</B> must specified)<BR>
 to Moab (configuration parameter <B>SchedulerPort</B> must specified)<BR>
 <P>
 <input type="text" name="scheduler_port" value="7321"> <B>SchedulerPort</B>: scheduler 
-communcations port (used by Wiki only)
+communications port (used by Wiki only)
 <P>
-Define what node configuration should be used. 
+Define what node configuration (sockets, cores, memory, etc.) should be used. 
 Using values defined in the configuration file will provide faster scheduling.<BR>
 Select one value for <B>FastSchedule</B>:<BR>
 <input type="radio" name="fast_schedule" value="1" checked>
 <B>1</B>: Use node configuration values defined in configuration file<BR>
 <input type="radio" name="fast_schedule" value="0">
-<B>0</B>: Use node configuration values actually found on each node
+<B>0</B>: Use node configuration values actually found on each node 
+(if configured with with gang scheduling or allocation of individual 
+processors to jobs rather than only whole node allocations, the processor 
+count on the node should match the configured value to avoid having extra 
+processors left idle)
 <P>
 
 <H2>Interconnect</H2>
@@ -633,7 +637,7 @@ Select one value for <B>SelectType</B>:<BR>
 </DL>
 <input type="radio" name="select_type" value="linear" checked>
 <B>Linear</B>: Node-base 
-resource allocation, does not manage indivual processor allocation<BR>
+resource allocation, does not manage individual processor allocation<BR>
 <input type="radio" name="select_type" value="bluegene">
 <B>BlueGene</B>: For IBM Blue Gene systems only<BR>
 <P>
@@ -718,7 +722,7 @@ Write completion status to a MySQL database<BR>
 <input type="radio" name="job_comp_type" value="pgsql"> <B>PGSQL</B>: 
 Write completion status to a PostreSQL database<BR>
 <input type="radio" name="job_comp_type" value="slurmdbd"> <B>SlurmDBD</B>: 
-Write completion status to Slurm adatabase daemon (serving multiple Slurm clusters) 
+Write completion status to Slurm a database daemon (serving multiple Slurm clusters) 
 which will write to some database<BR>
 <P>
 <input type="text" name="job_comp_loc" value=""> <B>JobCompLoc</B>: 
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index b66717050050e50f91ba7df934e72386963c3d05..ec6efae2aa4989508ee98aa0db9cb59a530cac57 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -401,8 +401,10 @@ Set the job's name to the specified value.
 \fINice\fP[=delta]
 Adjust job's priority by the specified value. Default value is 100.
 The adjustment range is from \-10000 (highest priority)
-to 10000 (lowest priority). Only privileged users can specify
-a negative adjustment.
+to 10000 (lowest priority). 
+Nice value changes are not additive, but overwrite any prior nice
+value and are applied to the job's base priority.
+Only privileged users can specify a negative adjustment.
 .TP
 \fIPartition\fP=<name>
 Set the job's partition to the specified value.
diff --git a/src/common/env.c b/src/common/env.c
index 6bbd90efe86eb04c81547758dfeeae8b6f150376..790045054b62882a217d466c925c4a937c8082b4 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -305,7 +305,7 @@ char *getenvp(char **env, const char *name)
 	return NULL;
 }
 
-int setup_env(env_t *env)
+int setup_env(env_t *env, bool preserve_env)
 {
 	int rc = SLURM_SUCCESS;
 	char *dist = NULL, *lllp_dist = NULL;
@@ -320,7 +320,7 @@ int setup_env(env_t *env)
 		 rc = SLURM_FAILURE;
 	}
 
-	if (env->nprocs
+	if (!preserve_env && env->nprocs
 	   && setenvf(&env->env, "SLURM_NPROCS", "%d", env->nprocs)) {
 		error("Unable to set SLURM_NPROCS environment variable");
 		rc = SLURM_FAILURE;
@@ -687,7 +687,7 @@ int setup_env(env_t *env)
 		rc = SLURM_FAILURE;
 	}
 	
-	if (env->nhosts
+	if (!preserve_env && env->nhosts
 	    && setenvf(&env->env, "SLURM_NNODES", "%d", env->nhosts)) {
 		error("Unable to set SLURM_NNODES environment var");
 		rc = SLURM_FAILURE;
@@ -699,7 +699,7 @@ int setup_env(env_t *env)
 		rc = SLURM_FAILURE;
 	}
 	
-	if (env->task_count 
+	if (!preserve_env && env->task_count 
 	    && setenvf (&env->env, 
 			"SLURM_TASKS_PER_NODE", "%s", env->task_count)) {
 		error ("Can't set SLURM_TASKS_PER_NODE env variable");
@@ -1143,7 +1143,8 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
  * pointed to by "dest" is NULL, memory will automatically be xmalloc'ed.
  * The array is terminated by a NULL pointer, and thus is suitable for
  * use by execle() and other env_array_* functions.  If preserve_env is
- * true, the variables SLURM_NNODES and SLURM_NPROCS remain unchanged.
+ * true, the variables SLURM_NNODES, SLURM_NPROCS and SLURM_TASKS_PER_NODE
+ * remain unchanged.
  *
  * Sets variables:
  *	SLURM_STEP_ID
@@ -1196,8 +1197,9 @@ env_array_for_step(char ***dest,
 					"%hu", step->step_layout->node_cnt);
 		env_array_overwrite_fmt(dest, "SLURM_NPROCS",
 					"%u", step->step_layout->task_cnt);
+		env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", 
+					tmp);
 	}
-	env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp);
 	env_array_overwrite_fmt(dest, "SLURM_SRUN_COMM_PORT",
 				"%hu", launcher_port);
 
diff --git a/src/common/env.h b/src/common/env.h
index 99c4e9c55f05673d0946fee70b632dbbb11547ee..46aec6c252d81d76cec85a7f5f830904a66f54d3 100644
--- a/src/common/env.h
+++ b/src/common/env.h
@@ -84,7 +84,7 @@ int     setenvfs(const char *fmt, ...);
 int     setenvf(char ***envp, const char *name, const char *fmt, ...);
 void	unsetenvp(char **env, const char *name);
 char *	getenvp(char **env, const char *name);
-int     setup_env(env_t *env);
+int	setup_env(env_t *env, bool preserve_env);
 
 /**********************************************************************
  * Newer environment variable handling scheme
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index e9f8f7daba99c096cfc330929ba0a51560d4084c..7473a1fa90ced04ca5f10e6ae9b2fb8183fb7762 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1393,12 +1393,14 @@ step_create(job_step_create_request_msg_t *step_specs,
 
 		if ((step_specs->resv_port_cnt != (uint16_t) NO_VAL) &&
 		    (step_specs->resv_port_cnt == 0)) {
-			/* reserved port count set to max task count any node */
+			/* reserved port count set to maximum task count on 
+			 * any node plus one */
 			for (i=0; i<step_ptr->step_layout->node_cnt; i++) {
 				step_specs->resv_port_cnt = 
 					MAX(step_specs->resv_port_cnt,
 					    step_ptr->step_layout->tasks[i]);
 			}
+			step_specs->resv_port_cnt++;
 		}
 		if (step_specs->resv_port_cnt != (uint16_t) NO_VAL) {
 			step_ptr->resv_port_cnt = step_specs->resv_port_cnt;
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index fb5b96236b851caffb480de5001bcecdbe413298..5bdc8e2f9ffd36ced28f2c056fb5425625908626 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -1388,7 +1388,7 @@ _wait_for_any_task(slurmd_job_t *job, bool waitflag)
 			
 			job->envtp->distribution = -1;
 			job->envtp->batch_flag = job->batch;
-			setup_env(job->envtp);
+			setup_env(job->envtp, false);
 			job->env = job->envtp->env;
 			if (job->task_epilog) {
 				_run_script_as_user("user task_epilog",
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index ffde558c3f93a5f33d3500314cc631f146b3e57c..0ac6d6c44843d8aa1d5465978d72e9ec692e4f82 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -382,7 +382,7 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 	job->envtp->distribution = -1;
 	job->envtp->ckpt_dir = xstrdup(job->ckpt_dir);
 	job->envtp->batch_flag = job->batch;
-	setup_env(job->envtp);
+	setup_env(job->envtp, false);
 	setenvf(&job->envtp->env, "SLURMD_NODENAME", "%s", conf->node_name);
 	job->env = job->envtp->env;
 	job->envtp->env = NULL;
diff --git a/src/srun/srun.c b/src/srun/srun.c
index a5c31c3add169e98af0fa84520e8ac1aeb3e6927..aed48fa942875aeb27673c77184a8be3a9e02ff8 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -371,7 +371,7 @@ int srun(int ac, char **av)
 		env->ws_col   = job->ws_col;
 		env->ws_row   = job->ws_row;
 	}
-	setup_env(env);
+	setup_env(env, opt.preserve_env);
 	xfree(env->task_count);
 	xfree(env);