diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 6d6e3c84e2bbedecb541138107dd2a1c9a6556fc..e218e9a44216903ed0722f70fa55369a92fd5e80 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -1541,7 +1541,7 @@ JobId=71701 Name=hostname
 .br
    Priority=66264 Account=none QOS=normal WCKey=*123
 .br
-   JobState=COMPLETED Reason=WaitingForScheduling Dependency=(null)
+   JobState=COMPLETED Reason=None Dependency=(null)
 .br
    TimeLimit=UNLIMITED Requeue=1 Restarts=0 BatchFlag=0 ExitCode=0:0
 .br
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 479f582ae6c2d79103dba636b0f21851c1b340ba..027152e358a24101045726b4ee146e9afbd634cc 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -481,8 +481,8 @@ one of those reasons is displayed.
 \fBDependency\fR
 This job is waiting for a dependent job to complete.
 .TP
-\fBWaitingForScheduling\fR
-The job is waiting for the next scheduling session.
+\fBNone\fR
+No reason is set for this job.
 .TP
 \fBPartitionDown\fR
 The partition required by this job is in a DOWN state.
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 0ec53bf9920fa1b2b613d7761ff68552226ff888..b893f1fef22af88f03644c945c84030a9796f0dd 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -295,7 +295,7 @@ enum job_states {
  * system efficiency */
 enum job_state_reason {
 /* Reasons for job to be pending */
-	WAIT_FOR_SCHED = 0,	/* job is waiting for scheduling  */
+	WAIT_NO_REASON = 0,	/* not set or job not pending */
 	WAIT_PRIORITY,		/* higher priority jobs exist */
 	WAIT_DEPENDENCY,	/* dependent job has not completed */
 	WAIT_RESOURCES,		/* required resources not available */
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index c853a039abde36a6b66d7c492a6e4d9275cec003..db6946343f2d86aa531e4fe0ada525a16adce3c1 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -984,8 +984,8 @@ extern void slurm_free_spank_env_responce_msg(spank_env_responce_msg_t *msg)
 extern char *job_reason_string(enum job_state_reason inx)
 {
 	switch (inx) {
-	case WAIT_FOR_SCHED:
-		return "WaitingForScheduling";
+	case WAIT_NO_REASON:
+		return "None";
 	case WAIT_PROLOG:
 		return "Prolog";
 	case WAIT_PRIORITY:
diff --git a/src/plugins/sched/wiki/start_job.c b/src/plugins/sched/wiki/start_job.c
index 5d2b8d9c2bfeb30836df0b6519ef26a0093d5dcd..eb232e4f09dd060891b9161dfdac676c369e36bd 100644
--- a/src/plugins/sched/wiki/start_job.c
+++ b/src/plugins/sched/wiki/start_job.c
@@ -251,7 +251,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 	old_task_cnt = job_ptr->details->min_cpus;
 	job_ptr->details->min_cpus = MAX(task_cnt, old_task_cnt);
 	job_ptr->priority = 100000000;
-	job_ptr->state_reason = WAIT_FOR_SCHED;
+	job_ptr->state_reason = WAIT_NO_REASON;
 
  fini:	unlock_slurmctld(job_write_lock);
 	if (rc)
diff --git a/src/plugins/sched/wiki2/start_job.c b/src/plugins/sched/wiki2/start_job.c
index 0fb0c1097d269aa6041e82e3f3e4488b4565a02c..3693c8f2578a1f8dcc9005689a9b0e2b8dc20111 100644
--- a/src/plugins/sched/wiki2/start_job.c
+++ b/src/plugins/sched/wiki2/start_job.c
@@ -306,7 +306,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 	old_task_cnt = job_ptr->details->min_cpus;
 	job_ptr->details->min_cpus = MAX(task_cnt, old_task_cnt);
 	job_ptr->priority = 100000000;
-	job_ptr->state_reason = WAIT_FOR_SCHED;
+	job_ptr->state_reason = WAIT_NO_REASON;
 
  fini:	unlock_slurmctld(job_write_lock);
 	if (rc)
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index eb5d50bd8a26eb687b53954d1b4620cd5b4c8840..8bb8785cee8ed0b812acb9b745f601521d3eed78 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -1059,7 +1059,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 
 	/* clear old state reason */
 	if (!acct_policy_job_runnable_state(job_ptr))
-		job_ptr->state_reason = WAIT_FOR_SCHED;
+		job_ptr->state_reason = WAIT_NO_REASON;
 
 	assoc_mgr_lock(&locks);
 	qos_ptr = job_ptr->qos_ptr;
@@ -1341,7 +1341,7 @@ extern bool acct_policy_job_runnable_post_select(
 
 	/* clear old state reason */
 	if (!acct_policy_job_runnable_state(job_ptr))
-		job_ptr->state_reason = WAIT_FOR_SCHED;
+		job_ptr->state_reason = WAIT_NO_REASON;
 
 	job_cpu_time_limit = (uint64_t)job_ptr->time_limit * (uint64_t)cpu_cnt;
 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 8f97ce0dc3335671f0a9f625c5b8ebe350c8ff3c..937feea56dbe2edf3da4a9d4cd09b3063cdae903 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -3203,7 +3203,7 @@ static int _select_nodes_parts(struct job_record *job_ptr, bool test_only,
 			job_ptr->part_ptr = part_ptr;
 			debug2("Try job %u on next partition %s",
 			       job_ptr->job_id, part_ptr->name);
-			if (job_limits_check(&job_ptr, false) != WAIT_FOR_SCHED)
+			if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
 				continue;
 			rc = select_nodes(job_ptr, test_only,
 					  select_node_bitmap);
@@ -3213,7 +3213,7 @@ static int _select_nodes_parts(struct job_record *job_ptr, bool test_only,
 		}
 		list_iterator_destroy(iter);
 	} else {
-		if (job_limits_check(&job_ptr, false) != WAIT_FOR_SCHED)
+		if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
 			test_only = true;
 		rc = select_nodes(job_ptr, test_only, select_node_bitmap);
 	}
@@ -3688,7 +3688,7 @@ extern int prolog_complete(uint32_t job_id, bool requeue,
 		error("Prolog launch failure, JobId=%u", job_ptr->job_id);
 	}
 
-	job_ptr->state_reason = WAIT_FOR_SCHED;
+	job_ptr->state_reason = WAIT_NO_REASON;
 	return SLURM_SUCCESS;
 }
 
@@ -4250,7 +4250,7 @@ _valid_job_part_qos(struct part_record *part_ptr, slurmdb_qos_rec_t *qos_ptr)
  * IN job_ptr - pointer to job table entry.
  * IN check_min_time - if true test job's minimum time limit,
  *		otherwise test maximum time limit
- * RET WAIT_FOR_SCHED on success, fail status otherwise.
+ * RET WAIT_NO_REASON on success, fail status otherwise.
  */
 extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 {
@@ -4276,7 +4276,7 @@ extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 	assoc_ptr = job_ptr->assoc_ptr;
 	if (!detail_ptr) {	/* To prevent CLANG error */
 		fatal("job %u has NULL details_ptr", job_ptr->job_id);
-		return WAIT_FOR_SCHED;
+		return WAIT_NO_REASON;
 	}
 
 #ifdef HAVE_BG
@@ -4291,7 +4291,7 @@ extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 	part_max_nodes = part_ptr->max_nodes;
 #endif
 
-	fail_reason = WAIT_FOR_SCHED;
+	fail_reason = WAIT_NO_REASON;
 
 	if (check_min_time && job_ptr->time_min)
 		time_check = job_ptr->time_min;
@@ -7501,7 +7501,7 @@ static bool _top_priority(struct job_record *job_ptr)
 				job_ptr->state_reason = WAIT_HELD;
 				xfree(job_ptr->state_desc);
 			}
-		} else if (job_ptr->state_reason == WAIT_FOR_SCHED) {
+		} else if (job_ptr->state_reason == WAIT_NO_REASON) {
 			job_ptr->state_reason = WAIT_PRIORITY;
 			xfree(job_ptr->state_desc);
 		}
@@ -8395,7 +8395,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			set_job_prio(job_ptr);
 			info("sched: update_job: releasing user hold "
 			     "for job_id %u", job_specs->job_id);
-			job_ptr->state_reason = WAIT_FOR_SCHED;
+			job_ptr->state_reason = WAIT_NO_REASON;
 			job_ptr->job_state &= ~JOB_SPECIAL_EXIT;
 			xfree(job_ptr->state_desc);
 		} else if (authorized ||
@@ -8423,7 +8423,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				xfree(job_ptr->state_desc);
 			} else if ((job_ptr->state_reason == WAIT_HELD) ||
 				   (job_ptr->state_reason == WAIT_HELD_USER)) {
-				job_ptr->state_reason = WAIT_FOR_SCHED;
+				job_ptr->state_reason = WAIT_NO_REASON;
 				job_ptr->job_state &= ~JOB_SPECIAL_EXIT;
 				xfree(job_ptr->state_desc);
 			}
@@ -8927,7 +8927,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		goto fini;
 
 	fail_reason = job_limits_check(&job_ptr, false);
-	if (fail_reason != WAIT_FOR_SCHED) {
+	if (fail_reason != WAIT_NO_REASON) {
 		if (fail_reason == WAIT_QOS_THRES)
 			error_code = ESLURM_QOS_THRES;
 		else if ((fail_reason == WAIT_PART_TIME_LIMIT) ||
@@ -8942,7 +8942,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		return error_code;
 	} else if ((job_ptr->state_reason != WAIT_HELD) &&
 		   (job_ptr->state_reason != WAIT_HELD_USER)) {
-		job_ptr->state_reason = WAIT_FOR_SCHED;
+		job_ptr->state_reason = WAIT_NO_REASON;
 	}
 
 #ifdef HAVE_BG
@@ -10151,14 +10151,14 @@ extern bool job_independent(struct job_record *job_ptr, int will_run)
 
 	/* Job is eligible to start now */
 	if (job_ptr->state_reason == WAIT_DEPENDENCY) {
-		job_ptr->state_reason = WAIT_FOR_SCHED;
+		job_ptr->state_reason = WAIT_NO_REASON;
 		xfree(job_ptr->state_desc);
 	}
 	if ((detail_ptr && (detail_ptr->begin_time == 0) &&
 	    (job_ptr->priority != 0))) {
 		detail_ptr->begin_time = now;
 	} else if (job_ptr->state_reason == WAIT_TIME) {
-		job_ptr->state_reason = WAIT_FOR_SCHED;
+		job_ptr->state_reason = WAIT_NO_REASON;
 		xfree(job_ptr->state_desc);
 	}
 	return true;
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 9980e592ee09346c93171b5ab68d0e16ac2ebd60..9d4a18020d8daa29e0b5db79e6169402a10f444d 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -187,7 +187,7 @@ static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
 #ifdef HAVE_FRONT_END
 	/* At least one front-end node up at this point */
 	if (job_ptr->state_reason == WAIT_FRONT_END) {
-		job_ptr->state_reason = WAIT_FOR_SCHED;
+		job_ptr->state_reason = WAIT_NO_REASON;
 		xfree(job_ptr->state_desc);
 		last_job_update = time(NULL);
 	}
@@ -236,12 +236,12 @@ static bool _job_runnable_test2(struct job_record *job_ptr, bool check_min_time)
 
 	reason = job_limits_check(&job_ptr, check_min_time);
 	if ((reason != job_ptr->state_reason) &&
-	    ((reason != WAIT_FOR_SCHED) ||
+	    ((reason != WAIT_NO_REASON) ||
 	     (!part_policy_job_runnable_state(job_ptr)))) {
 		job_ptr->state_reason = reason;
 		xfree(job_ptr->state_desc);
 	}
-	if (reason != WAIT_FOR_SCHED)
+	if (reason != WAIT_NO_REASON)
 		return false;
 	return true;
 }
@@ -275,7 +275,7 @@ extern List build_job_queue(bool clear_start, bool backfill)
 				list_next(part_iterator))) {
 				job_ptr->part_ptr = part_ptr;
 				reason = job_limits_check(&job_ptr, backfill);
-				if ((reason != WAIT_FOR_SCHED) &&
+				if ((reason != WAIT_NO_REASON) &&
 				    (reason != job_ptr->state_reason) &&
 				    (!part_policy_job_runnable_state(job_ptr))){
 					job_ptr->state_reason = reason;
@@ -284,7 +284,7 @@ extern List build_job_queue(bool clear_start, bool backfill)
 				/* priority_array index matches part_ptr_list
 				 * position: increment inx*/
 				inx++;
-				if (reason != WAIT_FOR_SCHED)
+				if (reason != WAIT_NO_REASON)
 					continue;
 				if (job_ptr->priority_array) {
 					_job_queue_append(job_queue, job_ptr,
@@ -534,7 +534,7 @@ next_part:		part_ptr = (struct part_record *)
 				continue;
 			}
 		}
-		if (job_limits_check(&job_ptr, false) != WAIT_FOR_SCHED)
+		if (job_limits_check(&job_ptr, false) != WAIT_NO_REASON)
 			continue;
 
 		/* Test for valid account, QOS and required nodes on each pass */
@@ -550,7 +550,7 @@ next_part:		part_ptr = (struct part_record *)
 						     accounting_enforce,
 						     (slurmdb_association_rec_t **)
 						     &job_ptr->assoc_ptr)) {
-				job_ptr->state_reason = WAIT_FOR_SCHED;
+				job_ptr->state_reason = WAIT_NO_REASON;
 				xfree(job_ptr->state_desc);
 				job_ptr->assoc_id = assoc_rec.id;
 				last_job_update = now;
@@ -573,7 +573,7 @@ next_part:		part_ptr = (struct part_record *)
 				continue;
 			} else if (job_ptr->state_reason == FAIL_QOS) {
 				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_FOR_SCHED;
+				job_ptr->state_reason = WAIT_NO_REASON;
 				last_job_update = now;
 			}
 		}
@@ -581,7 +581,7 @@ next_part:		part_ptr = (struct part_record *)
 		if ((job_ptr->state_reason == WAIT_QOS_JOB_LIMIT) ||
 		    (job_ptr->state_reason == WAIT_QOS_RESOURCE_LIMIT) ||
 		    (job_ptr->state_reason == WAIT_QOS_TIME_LIMIT)) {
-			job_ptr->state_reason = WAIT_FOR_SCHED;
+			job_ptr->state_reason = WAIT_NO_REASON;
 			xfree(job_ptr->state_desc);
 			last_job_update = now;
 		}
@@ -833,7 +833,7 @@ extern int schedule(uint32_t job_limit)
 				list_next(job_iterator))) {
 			if (!IS_JOB_PENDING(job_ptr))
 				continue;
-			if ((job_ptr->state_reason != WAIT_FOR_SCHED) &&
+			if ((job_ptr->state_reason != WAIT_NO_REASON) &&
 			    (job_ptr->state_reason != WAIT_RESOURCES) &&
 			    (job_ptr->state_reason != WAIT_NODE_NOT_AVAIL))
 				continue;
@@ -928,7 +928,7 @@ next_part:			part_ptr = (struct part_record *)
 				if (part_ptr) {
 					job_ptr->part_ptr = part_ptr;
 					if (job_limits_check(&job_ptr, false) !=
-					    WAIT_FOR_SCHED)
+					    WAIT_NO_REASON)
 						continue;
 				} else {
 					list_iterator_destroy(part_iterator);
@@ -997,7 +997,7 @@ next_part:			part_ptr = (struct part_record *)
 				}
 			}
 			if (found_resv) {
-				if (job_ptr->state_reason == WAIT_FOR_SCHED) {
+				if (job_ptr->state_reason == WAIT_NO_REASON) {
 					job_ptr->state_reason = WAIT_PRIORITY;
 					xfree(job_ptr->state_desc);
 				}
@@ -1012,7 +1012,7 @@ next_part:			part_ptr = (struct part_record *)
 		} else if (_failed_partition(job_ptr->part_ptr, failed_parts,
 					     failed_part_cnt)) {
 			if ((job_ptr->state_reason == WAIT_NODE_NOT_AVAIL)
-			    || (job_ptr->state_reason == WAIT_FOR_SCHED)) {
+			    || (job_ptr->state_reason == WAIT_NO_REASON)) {
 				job_ptr->state_reason = WAIT_PRIORITY;
 				xfree(job_ptr->state_desc);
 				last_job_update = now;
@@ -1039,7 +1039,7 @@ next_part:			part_ptr = (struct part_record *)
 						    accounting_enforce,
 						    (slurmdb_association_rec_t **)
 						    &job_ptr->assoc_ptr)) {
-				job_ptr->state_reason = WAIT_FOR_SCHED;
+				job_ptr->state_reason = WAIT_NO_REASON;
 				xfree(job_ptr->state_desc);
 				job_ptr->assoc_id = assoc_rec.id;
 				last_job_update = now;
@@ -1062,7 +1062,7 @@ next_part:			part_ptr = (struct part_record *)
 				continue;
 			} else if (job_ptr->state_reason == FAIL_QOS) {
 				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_FOR_SCHED;
+				job_ptr->state_reason = WAIT_NO_REASON;
 				last_job_update = now;
 			}
 		}
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 3857980f717af25045787a51bc01530ecac0e188..e404728d1fe91e88303c90f69e6bd47793b1a969 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -1746,7 +1746,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	}
 
 	/* assign the nodes and stage_in the job */
-	job_ptr->state_reason = WAIT_FOR_SCHED;
+	job_ptr->state_reason = WAIT_NO_REASON;
 	xfree(job_ptr->state_desc);
 
 	if (job_ptr->job_resrcs && job_ptr->job_resrcs->nodes)
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 2d51510ac1f81424e5bd89708bf3e4ff74392fac..bd05aacf982e3f8f79dd5ea238b3106c74dcf18d 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -807,7 +807,7 @@ extern struct part_record *create_part_record (void);
  * IN job_ptr - pointer to job table entry.
  * IN check_min_time - if true test job's minimum time limit,
  *		otherwise test maximum time limit
- * RET WAIT_FOR_SCHED on success, fail status otherwise.
+ * RET WAIT_NO_REASON on success, fail status otherwise.
  */
 extern int job_limits_check(struct job_record **job_pptr, bool check_min_time);