diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 62705f8f839cf45786188305184bb1242230ee1a..aba6e65f6024fd4b0e9b872ff43295a124a94854 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -3305,11 +3305,12 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	if (error_code) {
 		if (job_ptr && (immediate || will_run)) {
 			/* this should never really happen here */
-			job_ptr->job_state = JOB_PENDING;
+			job_ptr->job_state = JOB_FAILED;
+			job_ptr->exit_code = 1;
 			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
 			xfree(job_ptr->state_desc);
 			job_ptr->start_time = job_ptr->end_time = now;
-			job_ptr->priority = 0;
+			job_completion_logger(job_ptr, false);
 		}
 		return error_code;
 	}
@@ -3359,11 +3360,12 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 		top_prio = true;	/* don't bother testing,
 					 * it is not runable anyway */
 	if (immediate && (too_fragmented || (!top_prio) || (!independent))) {
-		job_ptr->job_state  = JOB_PENDING;
+		job_ptr->job_state  = JOB_FAILED;
+		job_ptr->exit_code  = 1;
 		job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
 		xfree(job_ptr->state_desc);
 		job_ptr->start_time = job_ptr->end_time = now;
-		job_ptr->priority = 0;
+		job_completion_logger(job_ptr, false);
 		if (!independent)
 			return ESLURM_DEPENDENCY;
 		else if (too_fragmented)
@@ -3407,11 +3409,12 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	    (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
 		/* Not fatal error, but job can't be scheduled right now */
 		if (immediate) {
-			job_ptr->job_state  = JOB_PENDING;
+			job_ptr->job_state  = JOB_FAILED;
+			job_ptr->exit_code  = 1;
 			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
 			xfree(job_ptr->state_desc);
 			job_ptr->start_time = job_ptr->end_time = now;
-			job_ptr->priority = 0;
+			job_completion_logger(job_ptr, false);
 		} else {	/* job remains queued */
 			_create_job_array(job_ptr, job_specs);
 			if ((error_code == ESLURM_NODES_BUSY) ||
@@ -3423,11 +3426,12 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	}
 
 	if (error_code) {	/* fundamental flaw in job request */
-		job_ptr->job_state  = JOB_PENDING;
+		job_ptr->job_state  = JOB_FAILED;
+		job_ptr->exit_code  = 1;
 		job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
 		xfree(job_ptr->state_desc);
 		job_ptr->start_time = job_ptr->end_time = now;
-		job_ptr->priority = 0;
+		job_completion_logger(job_ptr, false);
 		return error_code;
 	}
 
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 5a1ebe8fa1a10d94b44daedc2d801e12c4d4e9c9..405780eac61f74a6e31cafb2faef940db2244052 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -1878,11 +1878,13 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 
 	/* Send batch step info to accounting, only if the job is
 	 * still completing.  If the job was requeued because of node
-	 * failure an epilog script might not of ran so we already
-	 * finished the last instance of the job so this would be put
-	 * on the requeued instance which is incorrect. */
+	 * failure (state == pending) an epilog script might not of
+	 * ran so we already finished the last instance of the job so
+	 * this would be put on the requeued instance which is
+	 * incorrect.
+	 */
 	if (association_based_accounting && job_ptr
-	    && IS_JOB_COMPLETING(job_ptr)) {
+	    && !IS_JOB_PENDING(job_ptr)) {
 		struct step_record batch_step;
 		memset(&batch_step, 0, sizeof(struct step_record));
 		batch_step.job_ptr = job_ptr;