diff --git a/NEWS b/NEWS
index 8df683795ac19dcb46f017ece4732cdba7f27f13..b3da22b7e71913bc7e6546251392ef64ddf08330 100644
--- a/NEWS
+++ b/NEWS
@@ -30,6 +30,8 @@ documents those changes that are of interest to users and admins.
     exit with a return code of zero and others are killed with SIGKILL. Only an
     exit code of zero did this.
  -- task/cgroup make TaskAffinity=hard bind to threads instead of cores.
+ -- Add a new pending reason WaitingForScheduling which indicates a job has
+    not been processed by the scheduler yet.
 
 * Changes in Slurm 14.03.0
 ==========================
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 864f9f9a274e91425cdb2772ddcd771447706aa7..7d1f93f3360d1b05fd02e27ae3436f5917bd842b 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -1541,7 +1541,7 @@ JobId=71701 Name=hostname
 .br
    Priority=66264 Account=none QOS=normal WCKey=*123
 .br
-   JobState=COMPLETED Reason=None Dependency=(null)
+   JobState=COMPLETED Reason=WaitingForScheduling Dependency=(null)
 .br
    TimeLimit=UNLIMITED Requeue=1 Restarts=0 BatchFlag=0 ExitCode=0:0
 .br
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 027152e358a24101045726b4ee146e9afbd634cc..479f582ae6c2d79103dba636b0f21851c1b340ba 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -481,8 +481,8 @@ one of those reasons is displayed.
 \fBDependency\fR
 This job is waiting for a dependent job to complete.
 .TP
-\fBNone\fR
-No reason is set for this job.
+\fBWaitingForScheduling\fR
+The job is waiting for the next scheduling session.
 .TP
 \fBPartitionDown\fR
 The partition required by this job is in a DOWN state.
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index b893f1fef22af88f03644c945c84030a9796f0dd..0ec53bf9920fa1b2b613d7761ff68552226ff888 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -295,7 +295,7 @@ enum job_states {
  * system efficiency */
 enum job_state_reason {
 /* Reasons for job to be pending */
-	WAIT_NO_REASON = 0,	/* not set or job not pending */
+	WAIT_FOR_SCHED = 0,	/* job is waiting for scheduling  */
 	WAIT_PRIORITY,		/* higher priority jobs exist */
 	WAIT_DEPENDENCY,	/* dependent job has not completed */
 	WAIT_RESOURCES,		/* required resources not available */
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index db6946343f2d86aa531e4fe0ada525a16adce3c1..c853a039abde36a6b66d7c492a6e4d9275cec003 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -984,8 +984,8 @@ extern void slurm_free_spank_env_responce_msg(spank_env_responce_msg_t *msg)
 extern char *job_reason_string(enum job_state_reason inx)
 {
 	switch (inx) {
-	case WAIT_NO_REASON:
-		return "None";
+	case WAIT_FOR_SCHED:
+		return "WaitingForScheduling";
 	case WAIT_PROLOG:
 		return "Prolog";
 	case WAIT_PRIORITY:
diff --git a/src/plugins/sched/wiki/start_job.c b/src/plugins/sched/wiki/start_job.c
index eb232e4f09dd060891b9161dfdac676c369e36bd..5d2b8d9c2bfeb30836df0b6519ef26a0093d5dcd 100644
--- a/src/plugins/sched/wiki/start_job.c
+++ b/src/plugins/sched/wiki/start_job.c
@@ -251,7 +251,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 	old_task_cnt = job_ptr->details->min_cpus;
 	job_ptr->details->min_cpus = MAX(task_cnt, old_task_cnt);
 	job_ptr->priority = 100000000;
-	job_ptr->state_reason = WAIT_NO_REASON;
+	job_ptr->state_reason = WAIT_FOR_SCHED;
 
  fini:	unlock_slurmctld(job_write_lock);
 	if (rc)
diff --git a/src/plugins/sched/wiki2/start_job.c b/src/plugins/sched/wiki2/start_job.c
index 3693c8f2578a1f8dcc9005689a9b0e2b8dc20111..0fb0c1097d269aa6041e82e3f3e4488b4565a02c 100644
--- a/src/plugins/sched/wiki2/start_job.c
+++ b/src/plugins/sched/wiki2/start_job.c
@@ -306,7 +306,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 	old_task_cnt = job_ptr->details->min_cpus;
 	job_ptr->details->min_cpus = MAX(task_cnt, old_task_cnt);
 	job_ptr->priority = 100000000;
-	job_ptr->state_reason = WAIT_NO_REASON;
+	job_ptr->state_reason = WAIT_FOR_SCHED;
 
  fini:	unlock_slurmctld(job_write_lock);
 	if (rc)
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 8bb8785cee8ed0b812acb9b745f601521d3eed78..eb5d50bd8a26eb687b53954d1b4620cd5b4c8840 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -1059,7 +1059,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 
 	/* clear old state reason */
 	if (!acct_policy_job_runnable_state(job_ptr))
-		job_ptr->state_reason = WAIT_NO_REASON;
+		job_ptr->state_reason = WAIT_FOR_SCHED;
 
 	assoc_mgr_lock(&locks);
 	qos_ptr = job_ptr->qos_ptr;
@@ -1341,7 +1341,7 @@ extern bool acct_policy_job_runnable_post_select(
 
 	/* clear old state reason */
 	if (!acct_policy_job_runnable_state(job_ptr))
-		job_ptr->state_reason = WAIT_NO_REASON;
+		job_ptr->state_reason = WAIT_FOR_SCHED;
 
 	job_cpu_time_limit = (uint64_t)job_ptr->time_limit * (uint64_t)cpu_cnt;
 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 52b7448ccab233d07c3bfa16ea19510a77669a97..fda1e2d089bdb554d4dec81b320096de58eb2f44 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -3203,7 +3203,7 @@ static int _select_nodes_parts(struct job_record *job_ptr, bool test_only,
 			job_ptr->part_ptr = part_ptr;
 			debug2("Try job %u on next partition %s",
 			       job_ptr->job_id, part_ptr->name);
-			if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
+			if (job_limits_check(&job_ptr, false) != WAIT_FOR_SCHED)
 				continue;
 			rc = select_nodes(job_ptr, test_only,
 					  select_node_bitmap);
@@ -3213,7 +3213,7 @@ static int _select_nodes_parts(struct job_record *job_ptr, bool test_only,
 		}
 		list_iterator_destroy(iter);
 	} else {
-		if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
+		if (job_limits_check(&job_ptr, false) != WAIT_FOR_SCHED)
 			test_only = true;
 		rc = select_nodes(job_ptr, test_only, select_node_bitmap);
 	}
@@ -3688,7 +3688,7 @@ extern int prolog_complete(uint32_t job_id, bool requeue,
 		error("Prolog launch failure, JobId=%u", job_ptr->job_id);
 	}
 
-	job_ptr->state_reason = WAIT_NO_REASON;
+	job_ptr->state_reason = WAIT_FOR_SCHED;
 	return SLURM_SUCCESS;
 }
 
@@ -4250,7 +4250,7 @@ _valid_job_part_qos(struct part_record *part_ptr, slurmdb_qos_rec_t *qos_ptr)
  * IN job_ptr - pointer to job table entry.
  * IN check_min_time - if true test job's minimum time limit,
  *		otherwise test maximum time limit
- * RET WAIT_NO_REASON on success, fail status otherwise.
+ * RET WAIT_FOR_SCHED on success, fail status otherwise.
  */
 extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 {
@@ -4276,7 +4276,7 @@ extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 	assoc_ptr = job_ptr->assoc_ptr;
 	if (!detail_ptr) {	/* To prevent CLANG error */
 		fatal("job %u has NULL details_ptr", job_ptr->job_id);
-		return WAIT_NO_REASON;
+		return WAIT_FOR_SCHED;
 	}
 
 #ifdef HAVE_BG
@@ -4291,7 +4291,7 @@ extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 	part_max_nodes = part_ptr->max_nodes;
 #endif
 
-	fail_reason = WAIT_NO_REASON;
+	fail_reason = WAIT_FOR_SCHED;
 
 	if (check_min_time && job_ptr->time_min)
 		time_check = job_ptr->time_min;
@@ -7499,7 +7499,7 @@ static bool _top_priority(struct job_record *job_ptr)
 				job_ptr->state_reason = WAIT_HELD;
 				xfree(job_ptr->state_desc);
 			}
-		} else if (job_ptr->state_reason == WAIT_NO_REASON) {
+		} else if (job_ptr->state_reason == WAIT_FOR_SCHED) {
 			job_ptr->state_reason = WAIT_PRIORITY;
 			xfree(job_ptr->state_desc);
 		}
@@ -8393,7 +8393,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			set_job_prio(job_ptr);
 			info("sched: update_job: releasing user hold "
 			     "for job_id %u", job_specs->job_id);
-			job_ptr->state_reason = WAIT_NO_REASON;
+			job_ptr->state_reason = WAIT_FOR_SCHED;
 			job_ptr->job_state &= ~JOB_SPECIAL_EXIT;
 			xfree(job_ptr->state_desc);
 		} else if (authorized ||
@@ -8421,7 +8421,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				xfree(job_ptr->state_desc);
 			} else if ((job_ptr->state_reason == WAIT_HELD) ||
 				   (job_ptr->state_reason == WAIT_HELD_USER)) {
-				job_ptr->state_reason = WAIT_NO_REASON;
+				job_ptr->state_reason = WAIT_FOR_SCHED;
 				job_ptr->job_state &= ~JOB_SPECIAL_EXIT;
 				xfree(job_ptr->state_desc);
 			}
@@ -8925,7 +8925,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		goto fini;
 
 	fail_reason = job_limits_check(&job_ptr, false);
-	if (fail_reason != WAIT_NO_REASON) {
+	if (fail_reason != WAIT_FOR_SCHED) {
 		if (fail_reason == WAIT_QOS_THRES)
 			error_code = ESLURM_QOS_THRES;
 		else if ((fail_reason == WAIT_PART_TIME_LIMIT) ||
@@ -8940,7 +8940,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		return error_code;
 	} else if ((job_ptr->state_reason != WAIT_HELD) &&
 		   (job_ptr->state_reason != WAIT_HELD_USER)) {
-		job_ptr->state_reason = WAIT_NO_REASON;
+		job_ptr->state_reason = WAIT_FOR_SCHED;
 	}
 
 #ifdef HAVE_BG
@@ -10149,14 +10149,14 @@ extern bool job_independent(struct job_record *job_ptr, int will_run)
 
 	/* Job is eligible to start now */
 	if (job_ptr->state_reason == WAIT_DEPENDENCY) {
-		job_ptr->state_reason = WAIT_NO_REASON;
+		job_ptr->state_reason = WAIT_FOR_SCHED;
 		xfree(job_ptr->state_desc);
 	}
 	if ((detail_ptr && (detail_ptr->begin_time == 0) &&
 	    (job_ptr->priority != 0))) {
 		detail_ptr->begin_time = now;
 	} else if (job_ptr->state_reason == WAIT_TIME) {
-		job_ptr->state_reason = WAIT_NO_REASON;
+		job_ptr->state_reason = WAIT_FOR_SCHED;
 		xfree(job_ptr->state_desc);
 	}
 	return true;
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 9d4a18020d8daa29e0b5db79e6169402a10f444d..9980e592ee09346c93171b5ab68d0e16ac2ebd60 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -187,7 +187,7 @@ static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
 #ifdef HAVE_FRONT_END
 	/* At least one front-end node up at this point */
 	if (job_ptr->state_reason == WAIT_FRONT_END) {
-		job_ptr->state_reason = WAIT_NO_REASON;
+		job_ptr->state_reason = WAIT_FOR_SCHED;
 		xfree(job_ptr->state_desc);
 		last_job_update = time(NULL);
 	}
@@ -236,12 +236,12 @@ static bool _job_runnable_test2(struct job_record *job_ptr, bool check_min_time)
 
 	reason = job_limits_check(&job_ptr, check_min_time);
 	if ((reason != job_ptr->state_reason) &&
-	    ((reason != WAIT_NO_REASON) ||
+	    ((reason != WAIT_FOR_SCHED) ||
 	     (!part_policy_job_runnable_state(job_ptr)))) {
 		job_ptr->state_reason = reason;
 		xfree(job_ptr->state_desc);
 	}
-	if (reason != WAIT_NO_REASON)
+	if (reason != WAIT_FOR_SCHED)
 		return false;
 	return true;
 }
@@ -275,7 +275,7 @@ extern List build_job_queue(bool clear_start, bool backfill)
 				list_next(part_iterator))) {
 				job_ptr->part_ptr = part_ptr;
 				reason = job_limits_check(&job_ptr, backfill);
-				if ((reason != WAIT_NO_REASON) &&
+				if ((reason != WAIT_FOR_SCHED) &&
 				    (reason != job_ptr->state_reason) &&
 				    (!part_policy_job_runnable_state(job_ptr))){
 					job_ptr->state_reason = reason;
@@ -284,7 +284,7 @@ extern List build_job_queue(bool clear_start, bool backfill)
 				/* priority_array index matches part_ptr_list
 				 * position: increment inx*/
 				inx++;
-				if (reason != WAIT_NO_REASON)
+				if (reason != WAIT_FOR_SCHED)
 					continue;
 				if (job_ptr->priority_array) {
 					_job_queue_append(job_queue, job_ptr,
@@ -534,7 +534,7 @@ next_part:		part_ptr = (struct part_record *)
 				continue;
 			}
 		}
-		if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
+		if (job_limits_check(&job_ptr, false) != WAIT_FOR_SCHED)
 			continue;
 
 		/* Test for valid account, QOS and required nodes on each pass */
@@ -550,7 +550,7 @@ next_part:		part_ptr = (struct part_record *)
 						     accounting_enforce,
 						     (slurmdb_association_rec_t **)
 						     &job_ptr->assoc_ptr)) {
-				job_ptr->state_reason = WAIT_NO_REASON;
+				job_ptr->state_reason = WAIT_FOR_SCHED;
 				xfree(job_ptr->state_desc);
 				job_ptr->assoc_id = assoc_rec.id;
 				last_job_update = now;
@@ -573,7 +573,7 @@ next_part:		part_ptr = (struct part_record *)
 				continue;
 			} else if (job_ptr->state_reason == FAIL_QOS) {
 				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_NO_REASON;
+				job_ptr->state_reason = WAIT_FOR_SCHED;
 				last_job_update = now;
 			}
 		}
@@ -581,7 +581,7 @@ next_part:		part_ptr = (struct part_record *)
 		if ((job_ptr->state_reason == WAIT_QOS_JOB_LIMIT) ||
 		    (job_ptr->state_reason == WAIT_QOS_RESOURCE_LIMIT) ||
 		    (job_ptr->state_reason == WAIT_QOS_TIME_LIMIT)) {
-			job_ptr->state_reason = WAIT_NO_REASON;
+			job_ptr->state_reason = WAIT_FOR_SCHED;
 			xfree(job_ptr->state_desc);
 			last_job_update = now;
 		}
@@ -833,7 +833,7 @@ extern int schedule(uint32_t job_limit)
 				list_next(job_iterator))) {
 			if (!IS_JOB_PENDING(job_ptr))
 				continue;
-			if ((job_ptr->state_reason != WAIT_NO_REASON) &&
+			if ((job_ptr->state_reason != WAIT_FOR_SCHED) &&
 			    (job_ptr->state_reason != WAIT_RESOURCES) &&
 			    (job_ptr->state_reason != WAIT_NODE_NOT_AVAIL))
 				continue;
@@ -928,7 +928,7 @@ next_part:			part_ptr = (struct part_record *)
 				if (part_ptr) {
 					job_ptr->part_ptr = part_ptr;
 					if (job_limits_check(&job_ptr, false) !=
-					    WAIT_NO_REASON)
+					    WAIT_FOR_SCHED)
 						continue;
 				} else {
 					list_iterator_destroy(part_iterator);
@@ -997,7 +997,7 @@ next_part:			part_ptr = (struct part_record *)
 				}
 			}
 			if (found_resv) {
-				if (job_ptr->state_reason == WAIT_NO_REASON) {
+				if (job_ptr->state_reason == WAIT_FOR_SCHED) {
 					job_ptr->state_reason = WAIT_PRIORITY;
 					xfree(job_ptr->state_desc);
 				}
@@ -1012,7 +1012,7 @@ next_part:			part_ptr = (struct part_record *)
 		} else if (_failed_partition(job_ptr->part_ptr, failed_parts,
 					     failed_part_cnt)) {
 			if ((job_ptr->state_reason == WAIT_NODE_NOT_AVAIL)
-			    || (job_ptr->state_reason == WAIT_NO_REASON)) {
+			    || (job_ptr->state_reason == WAIT_FOR_SCHED)) {
 				job_ptr->state_reason = WAIT_PRIORITY;
 				xfree(job_ptr->state_desc);
 				last_job_update = now;
@@ -1039,7 +1039,7 @@ next_part:			part_ptr = (struct part_record *)
 						    accounting_enforce,
 						    (slurmdb_association_rec_t **)
 						    &job_ptr->assoc_ptr)) {
-				job_ptr->state_reason = WAIT_NO_REASON;
+				job_ptr->state_reason = WAIT_FOR_SCHED;
 				xfree(job_ptr->state_desc);
 				job_ptr->assoc_id = assoc_rec.id;
 				last_job_update = now;
@@ -1062,7 +1062,7 @@ next_part:			part_ptr = (struct part_record *)
 				continue;
 			} else if (job_ptr->state_reason == FAIL_QOS) {
 				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_NO_REASON;
+				job_ptr->state_reason = WAIT_FOR_SCHED;
 				last_job_update = now;
 			}
 		}
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index e404728d1fe91e88303c90f69e6bd47793b1a969..3857980f717af25045787a51bc01530ecac0e188 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -1746,7 +1746,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	}
 
 	/* assign the nodes and stage_in the job */
-	job_ptr->state_reason = WAIT_NO_REASON;
+	job_ptr->state_reason = WAIT_FOR_SCHED;
 	xfree(job_ptr->state_desc);
 
 	if (job_ptr->job_resrcs && job_ptr->job_resrcs->nodes)
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index bd05aacf982e3f8f79dd5ea238b3106c74dcf18d..2d51510ac1f81424e5bd89708bf3e4ff74392fac 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -807,7 +807,7 @@ extern struct part_record *create_part_record (void);
  * IN job_ptr - pointer to job table entry.
  * IN check_min_time - if true test job's minimum time limit,
  *		otherwise test maximum time limit
- * RET WAIT_NO_REASON on success, fail status otherwise.
+ * RET WAIT_FOR_SCHED on success, fail status otherwise.
  */
 extern int job_limits_check(struct job_record **job_pptr, bool check_min_time);