diff --git a/NEWS b/NEWS
index a61e8a595516fca834d5c2261e03bf91de4ab1c5..05ef0d75093fd78de93a2dff5bbdd507752875b9 100644
--- a/NEWS
+++ b/NEWS
@@ -250,6 +250,8 @@ documents those changes that are of interest to users and administrators.
 * Changes in Slurm 14.11.8
 ==========================
  -- Eliminate need for user to set user_id on job_update calls.
+ -- Correct list of unavailable nodes reported in a job's "reason" field when
+    that job can not start.
 
 * Changes in Slurm 14.11.7
 ==========================
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index e89cfcb2630ac0ad1811ab914ce2ef4486f743bd..0a3a1bd02dd6d33fabf1927d6fe4ba5c77d85288 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -816,6 +816,9 @@ Some \fBsacct\fR options may
 be set via environment variables. These environment variables,
 along with their corresponding options, are listed below. (Note:
 Commandline options will always override these settings.)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
 .TP
 \fBSLURM_TIME_FORMAT\fR
 Specify the format used to report time stamps. A value of \fIstandard\fR, the
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index a07cbda244f41181a49e6060fe1fcd3c216ee535..cb2505f1a9012cfc9d17c193d7e3651a0b702c82 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -1878,6 +1878,14 @@ File to load into database.
 SQL to insert directly into the database.  This should be used very
 cautiously since this is writing your sql into the database.
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsacctmgr\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
 
 .SH "EXAMPLES"
 \fBNOTE:\fR There is an order to set up accounting associations.
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index ac7225966ef516aaf3bf617034e49769baebe910..ac56bae714ee93feb8cdf8f2d89c0b1ae4fcfb7b 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -1383,20 +1383,6 @@ Same as \fB\-v, \-\-verbose\fR
 \fBSALLOC_EXCLUSIVE\fR
 Same as \fB\-\-exclusive\fR
 .TP
-\fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs
-(e.g. invalid options).
-This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
-Also see \fBSLURM_EXIT_IMMEDIATE\fR.
-.TP
-\fBSLURM_EXIT_IMMEDIATE\fR
-Specifies the exit code generated when the \fB\-\-immediate\fR option
-is used and resources are not currently available.
-This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
-Also see \fBSLURM_EXIT_ERROR\fR.
-.TP
 \fBSALLOC_GEOMETRY\fR
 Same as \fB\-g, \-\-geometry\fR
 .TP
@@ -1470,6 +1456,23 @@ Same as \fB\-\-wckey\fR
 .TP
 \fBSALLOC_WAIT4SWITCH\fR
 Max time waiting for requested switches. See \fB\-\-switches\fR
+.TP
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+.TP
+\fBSLURM_EXIT_ERROR\fR
+Specifies the exit code generated when a SLURM error occurs
+(e.g. invalid options).
+This can be used by a script to distinguish application exit codes from
+various SLURM error conditions.
+Also see \fBSLURM_EXIT_IMMEDIATE\fR.
+.TP
+\fBSLURM_EXIT_IMMEDIATE\fR
+Specifies the exit code generated when the \fB\-\-immediate\fR option
+is used and resources are not currently available.
+This can be used by a script to distinguish application exit codes from
+various SLURM error conditions.
+Also see \fBSLURM_EXIT_ERROR\fR.
 
 .SH "OUTPUT ENVIRONMENT VARIABLES"
 .PP
@@ -1482,6 +1485,15 @@ The reservation ID on Cray systems running ALPS/BASIL only.
 \fBSLURM_CLUSTER_NAME\fR
 Name of the cluster on which the job is executing.
 .TP
+\fBMPIRUN_NOALLOCATE\fR
+Do not allocate a block on Blue Gene L/P systems only.
+.TP
+\fBMPIRUN_NOFREE\fR
+Do not free a block on Blue Gene L/P systems only.
+.TP
+\fBMPIRUN_PARTITION\fR
+The block name on Blue Gene systems only.
+.TP
 \fBSLURM_CPUS_PER_TASK\fR
 Number of cpus requested per task.
 Only set if the \fB\-\-cpus\-per\-task\fR option is specified.
@@ -1541,15 +1553,6 @@ count, that count is followed by "(x#)" where "#" is the
 repetition count. For example, "SLURM_TASKS_PER_NODE=2(x3),1"
 indicates that the first three nodes will each execute three
 tasks and the fourth node will execute one task.
-.TP
-\fBMPIRUN_NOALLOCATE\fR
-Do not allocate a block on Blue Gene L/P systems only.
-.TP
-\fBMPIRUN_NOFREE\fR
-Do not free a block on Blue Gene L/P systems only.
-.TP
-\fBMPIRUN_PARTITION\fR
-The block name on Blue Gene systems only.
 
 .SH "SIGNALS"
 .LP
diff --git a/doc/man/man1/sattach.1 b/doc/man/man1/sattach.1
index 6484ddd1ee0cdb015369eabb457277a04edd276c..096f4e5e810510ed69afb5702722b771d4279757 100644
--- a/doc/man/man1/sattach.1
+++ b/doc/man/man1/sattach.1
@@ -76,7 +76,11 @@ Upon startup, salloc will read and handle the options set in the following
 environment variables.  Note: Command line options always override environment
 variables settings.
 
-.TP 22
+.TP
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+.TP
 \fBSLURM_EXIT_ERROR\fR
 Specifies the exit code generated when a SLURM error occurs
 (e.g. invalid options).
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 5822a71d57232cafeb746653bc5c3fc240249504..080a5a36e152277f0505c97ccfea89ce78bf9679 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -1687,6 +1687,9 @@ Max time waiting for requested switches. See \fB\-\-switches\fR
 \fBSBATCH_WCKEY\fR
 Same as \fB\-\-wckey\fR
 .TP
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+.TP
 \fBSLURM_EXIT_ERROR\fR
 Specifies the exit code generated when a SLURM error occurs
 (e.g. invalid options).
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index 43a3e246c0749f30be83589559e0fa98f0aacd68..a38e8ac1bab0563a4efbbc78cc85a0c78586e165 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -85,6 +85,9 @@ these settings.)
 .TP
 \fBSBCAST_TIMEOUT\fR
 \fB\-t\fB \fIseconds\fR, fB\-\-timeout\fR=\fIseconds\fR
+.TP
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
 
 .SH "AUTHORIZATION"
 
diff --git a/doc/man/man1/scancel.1 b/doc/man/man1/scancel.1
index 8fe5958fe3ceb255a680cb936f5e03b5dd6aaae2..d8af8a91f635141d52f536c9bf0bebcf14a4c342 100644
--- a/doc/man/man1/scancel.1
+++ b/doc/man/man1/scancel.1
@@ -152,37 +152,37 @@ environment variables, along with their corresponding options, are listed below.
 .TP 20
 \fBSCANCEL_ACCOUNT\fR
 \fB\-A\fR, \fB\-\-account\fR=\fIaccount\fR
-.TP 20
+.TP
 \fBSCANCEL_BATCH\fR
 \fB\-b, \-\-batch\fR
-.TP 20
+.TP
 \fBSCANCEL_CTLD\fR
 \fB\-\-ctld\fR
-.TP 20
+.TP
 \fBSCANCEL_INTERACTIVE\fR
 \fB\-i\fR, \fB\-\-interactive\fR
-.TP 20
+.TP
 \fBSCANCEL_NAME\fR
 \fB\-n\fR, \fB\-\-name\fR=\fIjob_name\fR
-.TP 20
+.TP
 \fBSCANCEL_PARTITION\fR
 \fB\-p\fR, \fB\-\-partition\fR=\fIpartition_name\fR
-.TP 20
+.TP
 \fBSCANCEL_QOS\fR
 \fB\-q\fR, \fB\-\-qos\fR=\fIqos\fR
-.TP 20
+.TP
 \fBSCANCEL_STATE\fR
 \fB\-t\fR, \fB\-\-state\fR=\fIjob_state_name\fR
-.TP 20
+.TP
 \fBSCANCEL_USER\fR
 \fB\-u\fR, \fB\-\-user\fR=\fIuser_name\fR
-.TP 20
+.TP
 \fBSCANCEL_VERBOSE\fR
 \fB\-v\fR, \fB\-\-verbose\fR
-.TP 20
+.TP
 \fBSCANCEL_WCKEY\fR
 \fB\-\-wckey\fR=\fIwckey\fR
-.TP 20
+.TP
 \fBSLURM_CONF\fR
 The location of the SLURM configuration file.
 
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 8adaf331c5edd4b8c8ecf70c8f4971ead1064d8a..49f03391be0f301b52784490739ee48b7fe79bd9 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -786,11 +786,13 @@ The list of nodes allocated to the job.
 The NodeIndices expose the internal indices into the node table
 associated with the node(s) allocated to the job.
 .TP
+.na
 \fINtasksPerN:B:S:C\fP=
 <tasks_per_node>:<tasks_per_baseboard>:<tasks_per_socket>:<tasks_per_core>
 Specifies the number of tasks to be started per hardware component (node,
 baseboard, socket and core).
 Unconstrained values may be shown as "0" or "*".
+.ad
 .TP
 \fIPreemptTime\fP
 Time at which job was signaled that it was selected for preemption.
@@ -803,10 +805,12 @@ Time the job ran prior to last suspend.
 \fIReason\fP
 The reason job is not running: e.g., waiting "Resources".
 .TP
+.na
 \fIReqB:S:C:T\fP=
 <baseboard_count>:<socket_per_baseboard_count>:<core_per_socket_count>:<thread_per_core_count>
 Specifies the count of various hardware components requested by the job.
 Unconstrained values may be shown as "0" or "*".
+.ad
 .TP
 \fISecsPreSuspend\fP=<seconds>
 If the job is suspended, this is the run time accumulated by the job
diff --git a/doc/man/man1/sdiag.1 b/doc/man/man1/sdiag.1
index 18460a6a0970739f02d711c79dfb5539efefe3e3..8de73e68bed69d4de8b52de44ba707491d6003df 100644
--- a/doc/man/man1/sdiag.1
+++ b/doc/man/man1/sdiag.1
@@ -224,6 +224,15 @@ Print list of options and exit.
 \fB\-V\fR, \fB\-\-version\fR
 Print current version number and exit.
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsdiag\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+
 .SH "COPYING"
 Copyright (C) 2010-2011 Barcelona Supercomputing Center.
 .br
diff --git a/doc/man/man1/sprio.1 b/doc/man/man1/sprio.1
index c76b9508a508cb591dce0a620d39702640825fd8..d902530e7ad5d1bc2e07cf9e96fa4b530f2e610e 100644
--- a/doc/man/man1/sprio.1
+++ b/doc/man/man1/sprio.1
@@ -152,6 +152,9 @@ the following environment variables.
 .TP 20
 \fBSLURM_CLUSTERS\fR
 Same as \fB\-\-clusters\fR
+.TP
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
 
 .SH "EXAMPLES"
 .eo
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 7c7a97ad42b85de03ef28624b4e75d5d2ea89ce6..57079a097876ec819b3b6864bc4a1a69cdc6c506 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -403,6 +403,15 @@ Account, Cluster, Login, Proper, Used
 .TP
 All commands and options are case-insensitive.
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsreport\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+
 .in 0
 .SH "EXAMPLES"
 .TP
diff --git a/doc/man/man1/sshare.1 b/doc/man/man1/sshare.1
index d1e8c522b712fac3dff32f9aa47bcf7dab3fd684..86bb3c10e34f81f36e67305ff195ca6fd24c91e6 100644
--- a/doc/man/man1/sshare.1
+++ b/doc/man/man1/sshare.1
@@ -150,6 +150,15 @@ Associations with no usage receive the highest possible value, infinity.
 More information about Fair Tree can be found in doc/html/fair_tree.html or
 at http://slurm.schedmd.com/fair_tree.html
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsshare\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+
 .SH "EXAMPLES"
 .eo
 .br
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index 550f2393b99a7a2a8b4eec448cb682de76b7d9ea..cb55a237ff97d2cabc8be0b8ea1f2c85054eb45d 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -229,6 +229,15 @@ Total number of tasks in a job or step.
 \f3ReqCPUFreq\fP
 Requested CPU frequency for the step, in kHz.
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsstat\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+
 .SH "EXAMPLES"
 
 .TP
diff --git a/doc/man/man1/strigger.1 b/doc/man/man1/strigger.1
index 608619ca51de2f0fa7da72169ef6a10119bf1d21..f13786e1fff5d9541c066e615ee4514514c73649 100644
--- a/doc/man/man1/strigger.1
+++ b/doc/man/man1/strigger.1
@@ -278,6 +278,15 @@ Name of the user requesting the action
 \fBPROGRAM\fP
 Pathname of the program to execute when the event occurs
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBstrigger\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+
 .SH "EXAMPLES"
 Execute the program "/usr/sbin/primary_slurmctld_failure" whenever the
 primary slurmctld fails.
diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1
index ce7eb76fad0ffe773ea56333d1fb80a8d68eab16..370f6abce786c01e90385794c2228984e7af2f70 100644
--- a/doc/man/man1/sview.1
+++ b/doc/man/man1/sview.1
@@ -35,6 +35,15 @@ actively being made.
 Disable \fIAdmin Mode\fR immediately after the changes to avoid possibly making
 unintended changes.
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsview\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed below.
+(Note: commandline options will always override these settings)
+.TP 20
+\fBSLURM_CONF\fR
+The location of the SLURM configuration file.
+
 .SH "NOTES"
 The sview command can only be build if \fIgtk+\-2.0\fR is installed.
 Systems lacking these libraries will have SLURM installed without
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 596afc1ca449bb9ccff8f714d9f94d06e136de26..925c1d047e0439590b60fd9a5363cdcc1f182cf3 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1382,8 +1382,8 @@ May not exceed 65533.
 .TP
 \fBMemLimitEnforce\fR
 If set to "no" then Slurm will not terminate the job or the job step
-if they exceeds the value requested using the --mem-per-cpu option of
-salloc/sbatch/srun. This is useful if jobs need to specify --mem-per-cpu
+if they exceeds the value requested using the \-\-mem\-per\-cpu option of
+salloc/sbatch/srun. This is useful if jobs need to specify \-\-mem\-per\-cpu
 for scheduling but they should not be terminate if they exceed the
 estimated value. The default value is 'yes', terminate the job/step
 if exceed the requested memory.
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 84956c1c52eab8adf1450837848e30661217439f..3f9c5e10c3f27fafac7b23e335e1d9170711a5ed 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -1476,7 +1476,7 @@ static int _start_job(struct job_record *job_ptr, bitstr_t *resv_bitmap)
 		job_ptr->details->exc_node_bitmap = bit_copy(resv_bitmap);
 	if (job_ptr->array_recs)
 		is_job_array_head = true;
-	rc = select_nodes(job_ptr, false, NULL, NULL);
+	rc = select_nodes(job_ptr, false, NULL, NULL, NULL);
 	if (is_job_array_head && job_ptr->details) {
 		struct job_record *base_job_ptr;
 		base_job_ptr = find_job_record(job_ptr->array_job_id);
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 73e54acd7cacf013095d3bd1592434a26c75aa7f..d27319070ce3797c52b251f7246620ad87403cda 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -3838,7 +3838,7 @@ static int _select_nodes_parts(struct job_record *job_ptr, bool test_only,
 			if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
 				continue;
 			rc = select_nodes(job_ptr, test_only,
-					  select_node_bitmap, err_msg);
+					  select_node_bitmap, NULL, err_msg);
 			if ((rc != ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE) &&
 			    (rc != ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) &&
 			    (rc != ESLURM_RESERVATION_BUSY) &&
@@ -3853,7 +3853,7 @@ static int _select_nodes_parts(struct job_record *job_ptr, bool test_only,
 		if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
 			test_only = true;
 		rc = select_nodes(job_ptr, test_only, select_node_bitmap,
-				  err_msg);
+				  NULL, err_msg);
 	}
 
 	return rc;
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index ffd67172ba1c1dd0acc4a0fe4b6224053727ddad..8f30b39391a3449a4dacc297c88cf7835a8231d3 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -756,7 +756,7 @@ next_part:		part_ptr = (struct part_record *)
 		job_ptr->details->exc_node_bitmap =
 			bit_copy(fini_job_ptr->job_resrcs->node_bitmap);
 		bit_not(job_ptr->details->exc_node_bitmap);
-		error_code = select_nodes(job_ptr, false, NULL, NULL);
+		error_code = select_nodes(job_ptr, false, NULL, NULL, NULL);
 		bit_free(job_ptr->details->exc_node_bitmap);
 		job_ptr->details->exc_node_bitmap = orig_exc_bitmap;
 		if (error_code == SLURM_SUCCESS) {
@@ -980,6 +980,7 @@ static int _schedule(uint32_t job_limit)
 	struct part_record *reject_array_part = NULL;
 	uint16_t reject_state_reason = WAIT_NO_REASON;
 	char job_id_buf[32];
+	char *unavail_node_str = NULL;
 #if HAVE_SYS_PRCTL_H
 	char get_name[16];
 #endif
@@ -1199,6 +1200,9 @@ static int _schedule(uint32_t job_limit)
 	failed_parts = xmalloc(sizeof(struct part_record *) * part_cnt);
 	failed_resv = xmalloc(sizeof(struct slurmctld_resv*) * MAX_FAILED_RESV);
 	save_avail_node_bitmap = bit_copy(avail_node_bitmap);
+	bit_not(avail_node_bitmap);
+	unavail_node_str = bitmap2node_name(avail_node_bitmap);
+	bit_not(avail_node_bitmap);
 
 	if (max_jobs_per_part) {
 		ListIterator part_iterator;
@@ -1500,7 +1504,8 @@ next_task:
 			continue;
 		}
 
-		error_code = select_nodes(job_ptr, false, NULL, NULL);
+		error_code = select_nodes(job_ptr, false, NULL,
+					  unavail_node_str, NULL);
 		if (error_code == ESLURM_NODES_BUSY) {
 			debug3("sched: JobId=%u. State=%s. Reason=%s. "
 			       "Priority=%u. Partition=%s.",
@@ -1672,6 +1677,7 @@ next_task:
 	save_last_part_update = last_part_update;
 	FREE_NULL_BITMAP(avail_node_bitmap);
 	avail_node_bitmap = save_avail_node_bitmap;
+	xfree(unavail_node_str);
 	xfree(failed_parts);
 	xfree(failed_resv);
 	if (fifo_sched) {
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 8f2df45c5074f457fc81d5c3106ef18e4e89c151..fed981ab7c7dea4e0a9894ecd58c308c3dc69120 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -1619,6 +1619,7 @@ static bool _first_array_task(struct job_record *job_ptr)
  * IN select_node_bitmap - bitmap of nodes to be used for the
  *	job's resource allocation (not returned if NULL), caller
  *	must free
+ * IN unavail_node_str - Nodes which are currently unavailable.
  * OUT err_msg - if not NULL set to error message for job, caller must xfree
  * RET 0 on success, ESLURM code from slurm_errno.h otherwise
  * globals: list_part - global list of partition info
@@ -1633,7 +1634,8 @@ static bool _first_array_task(struct job_record *job_ptr)
  *	3) Call allocate_nodes() to perform the actual allocation
  */
 extern int select_nodes(struct job_record *job_ptr, bool test_only,
-			bitstr_t **select_node_bitmap, char **err_msg)
+			bitstr_t **select_node_bitmap, char *unavail_node_str,
+			char **err_msg)
 {
 	int error_code = SLURM_SUCCESS, i, node_set_size = 0;
 	bitstr_t *select_bitmap = NULL;
@@ -1850,19 +1852,18 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 
 		/* Non-fatal errors for job below */
 		} else if (error_code == ESLURM_NODE_NOT_AVAIL) {
-			char *tmp_nodelist;
 			/* Required nodes are down or drained */
 			debug3("JobId=%u required nodes not avail",
 			       job_ptr->job_id);
 			job_ptr->state_reason = WAIT_NODE_NOT_AVAIL;
 			xfree(job_ptr->state_desc);
-			bit_not(avail_node_bitmap);
-			tmp_nodelist = bitmap2node_name(avail_node_bitmap);
-			bit_not(avail_node_bitmap);
 			xstrfmtcat(job_ptr->state_desc,
-				   "ReqNodeNotAvail(Unavailable:%s)",
-				   tmp_nodelist);
-			xfree(tmp_nodelist);
+				   "ReqNodeNotAvail, May be reserved for other job");
+			if (unavail_node_str) {
+				xstrfmtcat(job_ptr->state_desc,
+					   ", UnavailableNodes:%s",
+					   unavail_node_str);
+			}
 			last_job_update = now;
 		} else if ((error_code == ESLURM_RESERVATION_NOT_USABLE) ||
 			   (error_code == ESLURM_RESERVATION_BUSY)) {
diff --git a/src/slurmctld/node_scheduler.h b/src/slurmctld/node_scheduler.h
index 4bf2b70e8133b0a19bede0eec07a84bf77350e61..58e24e9e536a50a5908e4b39f270dc670893b5aa 100644
--- a/src/slurmctld/node_scheduler.h
+++ b/src/slurmctld/node_scheduler.h
@@ -85,6 +85,7 @@ extern void re_kill_job(struct job_record *job_ptr);
  * IN select_node_bitmap - bitmap of nodes to be used for the
  *	job's resource allocation (not returned if NULL), caller
  *	must free
+ * IN unavail_node_str - Nodes which are currently unavailable.
  * OUT err_msg - if not NULL set to error message for job, caller must xfree
  * RET 0 on success, ESLURM code from slurm_errno.h otherwise
  * globals: list_part - global list of partition info
@@ -99,6 +100,7 @@ extern void re_kill_job(struct job_record *job_ptr);
  *	3) Call allocate_nodes() to perform the actual allocation
  */
 extern int select_nodes(struct job_record *job_ptr, bool test_only,
-		bitstr_t **select_node_bitmap, char **err_msg);
+			bitstr_t **select_node_bitmap, char *unavail_node_str,
+			char **err_msg);
 
 #endif /* !_HAVE_NODE_SCHEDULER_H */
diff --git a/src/squeue/opts.c b/src/squeue/opts.c
index c367a29d592b85eb5d899124923a291f62e62f11..1cba73f1591888466a6ed8b0325d331c66282a48 100644
--- a/src/squeue/opts.c
+++ b/src/squeue/opts.c
@@ -215,7 +215,7 @@ parse_command_line( int argc, char* argv[] )
 			if (params.format == NULL) {
 				params.format_long = xstrdup(optarg);
 			} else {
-				error ("-O (--Format) is incompatable with -o "
+				error ("-O (--Format) is incompatible with -o "
 				       "(--format)");
 				exit(1);
 			}