diff --git a/NEWS b/NEWS
index daaa473bf8f0be045436bd9f260cdcfbcacc6390..1d09ce2ce8e84c080fae75efad3ddbcab27ac81a 100644
--- a/NEWS
+++ b/NEWS
@@ -4,6 +4,12 @@ documents those changes that are of interest to users and admins.
 * Changes in SLURM 2.3.0.pre5
 =============================
  -- BLUEGENE - Improve speed of start up when removing blocks at the beginning.
+ -- Correct init.d/slurm status to have non-zero exit code if ANY Slurm
+    damon that should be running on the node is not running. Patch from Rod
+    Schulz, Bull.
+ -- Improve accuracy of response to "srun --test-only jobid=#".
+ -- Correct logic to perperly support --ntasks-per-node option in the
+    select/cons_res plugin. Patch from Rod Schulz, Bull.
 
 * Changes in SLURM 2.3.0.pre4
 =============================
diff --git a/etc/init.d.slurm b/etc/init.d.slurm
index edf9ac7bfde3c1fd018b8d4a75f63f4beba6ae0c..74a52a93f1ff8c329d4ce286745c3520c2bb0e77 100644
--- a/etc/init.d.slurm
+++ b/etc/init.d.slurm
@@ -245,9 +245,15 @@ case "$1" in
 	slurmstop
 	;;
     status)
+	anystop=0
 	for prog in `$BINDIR/scontrol show daemons`; do
 	   slurmstatus $prog
+	   rc=$?
+	   if [ $rc != 0 ] ; then
+	       anystop=$rc
+	   fi
 	done
+	RETVAL=$anystop          
 	;;
     restart)
 	$0 stop
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index c89e4dfeb130312ea4fb90a41ec4d5df058ce514..560db4c263e1b083e99ef17672c91fd174ac1538 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -75,8 +75,14 @@ static int _compute_c_b_task_dist(struct job_record *job_ptr)
 		return SLURM_ERROR;
 	}
 
-	maxtasks = job_res->ncpus;
+
+	if (job_ptr->details->ntasks_per_node == 0)
+		maxtasks = job_res->ncpus;
+	else
+		maxtasks = job_res->ncpus * job_ptr->details->ntasks_per_node;
+
 	avail_cpus = job_res->cpus;
+
 	job_res->cpus = xmalloc(job_res->nhosts * sizeof(uint16_t));
 
 	/* ncpus is already set the number of tasks if overcommit is used */
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 37172432249afdd4673d2c657871505769da35db..447a5cfc4fd348defe9eefa4a857af179b90b65b 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -1159,7 +1159,7 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
 	uint32_t min_nodes, max_nodes, req_nodes;
 	int i, rc = SLURM_SUCCESS;
-	time_t now = time(NULL), start_res;
+	time_t now = time(NULL), start_res, orig_start_time = (time_t) 0;
 	List preemptee_candidates = NULL, preemptee_job_list = NULL;
 
 	job_ptr = find_job_record(job_desc_msg->job_id);
@@ -1235,6 +1235,13 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 		else
 			req_nodes = min_nodes;
 		preemptee_candidates = slurm_find_preemptable_jobs(job_ptr);
+
+		/* The orig_start is based upon the backfill scheduler data
+		 * and considers all higher priority jobs. The logic below
+		 * only considers currently running jobs, so the expected
+		 * start time will almost certainly be earlier and not as
+		 * accurate, but this algorithm is much faster. */
+		orig_start_time = job_ptr->start_time;
 		rc = select_g_job_test(job_ptr, avail_bitmap,
 				       min_nodes, max_nodes, req_nodes,
 				       SELECT_MODE_WILL_RUN,
@@ -1254,7 +1261,9 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 #else
 		resp_data->proc_cnt = job_ptr->total_cpus;
 #endif
-		resp_data->start_time = MAX(job_ptr->start_time, start_res);
+		resp_data->start_time = MAX(job_ptr->start_time,
+					    orig_start_time);
+		resp_data->start_time = MAX(resp_data->start_time, start_res);
 		job_ptr->start_time   = 0;  /* restore pending job start time */
 		resp_data->node_list  = bitmap2node_name(avail_bitmap);
 
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index 32ec9c161c2a7fdccde061015ca1bf4ec588ad8f..a8aadc03b76fc1b65988135b8a034f0ef978ea5c 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -3619,7 +3619,12 @@ _pause_for_job_completion (uint32_t job_id, char *nodes, int max_time)
 			xcpu_signal(SIGKILL, nodes);
 			_terminate_all_steps(job_id, true);
 		}
-		sleep (1);
+		if (sec < 10)
+			sleep(1);
+		else {
+			/* Reduce logging about unkillable tasks */
+			sleep(60);
+		}
 	}
 
 	/*
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index fb07d4201cd4f07fb0376ed1c5743a9d312ebd2f..edae5cb9365cf21bd418c243a64bbcc1ac6f231d 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -913,6 +913,8 @@ job_manager(slurmd_job_t *job)
 
 	if (rc) {
 		error("IO setup failed: %m");
+		job->task[0]->estatus = 0x0100;
+		step_complete.step_rc = 0x0100;
 		rc = SLURM_SUCCESS;	/* drains node otherwise */
 		goto fail2;
 	} else {