diff --git a/NEWS b/NEWS
index 00d4f0b3397a129e4d3ba1b348f4d2535736be91..2fdde2e4c863457820e26ef453a01a9076800dfe 100644
--- a/NEWS
+++ b/NEWS
@@ -150,15 +150,22 @@ documents those changes that are of interest to users and admins.
  -- Make srun obey POSIX convention and increase the exit code by 128 when the
     process terminated by a signal.
  -- Sanity check for acct_gather_energy/rapl
- -- If the proctrack plugins fail to destroy the job container print an error message
-    and avoid to loop forever, give up after 120 seconds.
+ -- If the proctrack plugins fail to destroy the job container print an error
+    message and avoid to loop forever, give up after 120 seconds.
  -- If the sbatch command specifies the option --signal=B:signum sent the signal
     to the batch script only.
  -- If we cancel a task and we have no other exit code send the signal and
     exit code.
  -- Added note about InnoDB storage engine being used with MySQL.
- -- Set the job exit code when the job is signaled and set the log level to debug2()
-    when processing an already completed job.
+ -- Set the job exit code when the job is signaled and set the log level to
+    debug2() when processing an already completed job.
+ -- Reset diagnostics time stamp when "sdiag --reset" is called.
+ -- squeue and scontrol to report a job's "shared" value based upon partition
+    options rather than reporting "unknown" if job submission does not use
+    --exclusive or --shared option.
+ -- task/cgroup - Fix cpuset binding for batch script.
+ -- sched/backfill - Fix anomaly that could result in jobs being scheduled out
+    of order.
 
 * Changes in Slurm 14.03.4
 ==========================
diff --git a/doc/html/slurm_ug_agenda.shtml b/doc/html/slurm_ug_agenda.shtml
index d8d3894284ca9d85f6de216933687220a89eafb0..9725e8b0c01a1c3f83fde8906655868f29b5e77f 100644
--- a/doc/html/slurm_ug_agenda.shtml
+++ b/doc/html/slurm_ug_agenda.shtml
@@ -630,18 +630,18 @@ More information will be made available later.</p>
 
 <h3>Level-based job prioritization</h3>
 <p>Ryan Cox and Levi Morrison (Brigham Young University)</p>
-<p>I would like to present on our new LEVEL_BASED prioritization
-  mechanism.  It recurses through the account tree and calculates fair
-  share at each level.  It uses bitwise math to ensure that the effect
-  of usage and shares below the current level cannot affect
-  calculations at the current level.</p>
-<p>Basically, if accounts A and B both have users in the queue and
-  account A should have a higher fair share factor than account B,
-  users in account B cannot have a higher priority than users in
-  account A.  This is not guaranteed in other prioritization
-  methods. LEVEL_BASED was also carefully designed to avoid errors due
-  to precision loss and calculations as have previously been
-  described.</p>
+<p>We will present about our new LEVEL_BASED job prioritization mechanism.
+  The algorithm prioritizes users such that users in an under-served account
+  will always have a higher fair share factor than users in an over-served
+  account.  It recurses through the account tree, calculates fair share at
+  each level, then uses bitwise math to ensure that the effect of usage and
+  shares below the current level cannot affect calculations at the current
+  level.</p>
+<p>Basically, if accounts A and B have the same shares but A has higher usage
+  than B then children of account A will have a lower fair share factor than
+  children of account B. This is not guaranteed in other prioritization methods.
+  LEVEL_BASED was also designed to reduce the likelihood of errors due to
+  floating point precision loss.</p>
 
 <h3>Integrating Layouts Framework in SLURM</h3>
 <p>Thomas Cadeau (BULL), Yiannis Georgiou (BULL), Matthieu Hautreux (CEA)</p>
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 5eb43edc59a02012f48e839bd4c38ee20e64231b..975470f6bf27175ea2ae8bea735e8e73c6ce563c 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -693,6 +693,9 @@ value will equal the original \fB\-\-mem\-per\-cpu\fR value specified by
 the user.
 This parameter would generally be used if individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
+If resources are allocated by the core, socket or whole nodes; the number
+of CPUs allocated to a job may be higher than the task count and the value
+of \fB\-\-mem\-per\-cpu\fR should be adjusted accordingly.
 Also see \fB\-\-mem\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 18189e73bb33d5de5d6cf7771dffda18422fcecb..0f68d707f56edaacb5ce46e2bf6f90cae836367f 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -780,6 +780,9 @@ value will equal the original \fB\-\-mem\-per\-cpu\fR value specified by
 the user.
 This parameter would generally be used if individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
+If resources are allocated by the core, socket or whole nodes; the number
+of CPUs allocated to a job may be higher than the task count and the value
+of \fB\-\-mem\-per\-cpu\fR should be adjusted accordingly.
 Also see \fB\-\-mem\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 1e9baa886a50952db0050b539895b69cd219ff86..55dd2fdb7eb690e97b60d5683d1ef7e0c026a900 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -830,6 +830,9 @@ value will equal the original \fB\-\-mem\-per\-cpu\fR value specified by
 the user.
 This parameter would generally be used if individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
+If resources are allocated by the core, socket or whole nodes; the number
+of CPUs allocated to a job may be higher than the task count and the value
+of \fB\-\-mem\-per\-cpu\fR should be adjusted accordingly.
 Specifying a memory limit of zero for a job step will restrict the job step
 to the amount of memory allocated to the job, but not remove any of the job's
 memory allocation from being available to other job steps.
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 9cd65bb9ac882aac9236bb98f9c2ac9b0c98addf..3dececdb93d6d7ed2fa56994711dfdf27bd361a1 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -667,7 +667,6 @@ static int _attempt_backfill(void)
 	uint32_t min_nodes, max_nodes, req_nodes;
 	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
 	bitstr_t *exc_core_bitmap = NULL, *non_cg_bitmap = NULL;
-	bitstr_t *previous_bitmap = NULL;
 	time_t now, sched_start, later_start, start_res, resv_end, window_end;
 	node_space_map_t *node_space;
 	struct timeval bf_time1, bf_time2;
@@ -973,7 +972,6 @@ static int _attempt_backfill(void)
 
 		/* Determine impact of any resource reservations */
 		later_start = now;
-		FREE_NULL_BITMAP(previous_bitmap);
  TRY_LATER:
 		if (slurmctld_config.shutdown_time)
 			break;
@@ -1083,22 +1081,18 @@ static int _attempt_backfill(void)
 		    ((job_ptr->details->req_node_bitmap) &&
 		     (!bit_super_set(job_ptr->details->req_node_bitmap,
 				     avail_bitmap))) ||
-		    (job_req_node_filter(job_ptr, avail_bitmap)) ||
-		    (previous_bitmap &&
-		     bit_equal(previous_bitmap, avail_bitmap))) {
+		    (job_req_node_filter(job_ptr, avail_bitmap))) {
 			if (later_start) {
 				job_ptr->start_time = 0;
 				goto TRY_LATER;
 			}
+
 			/* Job can not start until too far in the future */
 			job_ptr->time_limit = orig_time_limit;
 			job_ptr->start_time = sched_start + backfill_window;
 			continue;
 		}
 
-		FREE_NULL_BITMAP(previous_bitmap);
-		previous_bitmap = bit_copy(avail_bitmap);
-
 		/* Identify nodes which are definitely off limits */
 		FREE_NULL_BITMAP(resv_bitmap);
 		resv_bitmap = bit_copy(avail_bitmap);
@@ -1278,7 +1272,6 @@ static int _attempt_backfill(void)
 	FREE_NULL_BITMAP(exc_core_bitmap);
 	FREE_NULL_BITMAP(resv_bitmap);
 	FREE_NULL_BITMAP(non_cg_bitmap);
-	FREE_NULL_BITMAP(previous_bitmap);
 
 	for (i=0; ; ) {
 		FREE_NULL_BITMAP(node_space[i].avail_bitmap);
diff --git a/src/plugins/task/cgroup/task_cgroup_cpuset.c b/src/plugins/task/cgroup/task_cgroup_cpuset.c
index cd5eee4ccc31300c694aec21e5e2eb30ee66c537..65b6a2bdd0b60e78fecde144210a7d6c8d85a02e 100644
--- a/src/plugins/task/cgroup/task_cgroup_cpuset.c
+++ b/src/plugins/task/cgroup/task_cgroup_cpuset.c
@@ -1114,8 +1114,11 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 	uint32_t jntasks = job->node_tasks;
 	uint32_t jnpus;
 
-	job->cpus_per_task = MAX(1, job->cpus_per_task);
-	jnpus = jntasks * job->cpus_per_task;
+	if (job->batch) {
+		jnpus = job->cpus;
+		job->cpus_per_task = job->cpus;
+	} else
+		jnpus = jntasks * job->cpus_per_task;
 
 	bind_type = job->cpu_bind_type;
 	if ((conf->task_plugin_param & CPU_BIND_VERBOSE) ||
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 65eeacb827586280d15e5e5df9e782ac44aee0fd..9667818e2411984de545ce2b57495a21ad22764e 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -1649,20 +1649,12 @@ static void *_slurmctld_background(void *no_data)
 			_accounting_cluster_ready();
 		}
 
+		/* Stats will reset at midnight (approx) local time. */
 		if (last_proc_req_start == 0) {
-			/* Stats will reset at midnight (aprox).
-			 * Uhmmm... UTC time?... It is  not so important.
-			 * Just resetting during the night */
-			last_proc_req_start = now;
-			next_stats_reset = last_proc_req_start -
-					   (last_proc_req_start % 86400) +
-					   86400;
-		}
-
-		if ((next_stats_reset > 0) && (now > next_stats_reset)) {
-			/* Resetting stats values */
 			last_proc_req_start = now;
 			next_stats_reset = now - (now % 86400) + 86400;
+		} else if (now >= next_stats_reset) {
+			next_stats_reset = now - (now % 86400) + 86400;
 			reset_stats(0);
 		}
 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index e19fe51b70cb5c9ffe70c9cd3caae0f65a9c20b5..9fa3298ba13ba6894552c0c301a00d68bc8f1ad7 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -7027,6 +7027,26 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 	char *cmd_line = NULL;
 	char *tmp = NULL;
 	uint32_t len = 0;
+	uint16_t shared = 0;
+
+	if (!detail_ptr)
+		shared = (uint16_t) NO_VAL;
+	else if (detail_ptr->share_res == 1)	/* User --share */
+		shared = 1;
+	else if ((detail_ptr->share_res == 0) ||
+		 (detail_ptr->whole_node == 1))	/* User --exclusive */
+		shared = 0;
+	else if (job_ptr->part_ptr) {
+		/* Report shared status based upon latest partition info */
+		if ((job_ptr->part_ptr->max_share & SHARED_FORCE) &&
+		    ((job_ptr->part_ptr->max_share & (~SHARED_FORCE)) > 1))
+			shared = 1;		/* Partition Shared=force */
+		else if (job_ptr->part_ptr->max_share == 0)
+			shared = 0;		/* Partition Shared=exclusive */
+		else
+			shared = 0;		/* Part Shared=yes or no */
+	} else
+		shared = (uint16_t) NO_VAL;	/* No user or partition info */
 
 	if (max_cpu_cnt == -1)
 		max_cpu_cnt = _find_node_max_cpu_cnt();
@@ -7097,6 +7117,7 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 			}
 			pack16(detail_ptr->requeue,   buffer);
 			pack16(detail_ptr->ntasks_per_node, buffer);
+			pack16(shared, buffer);
 		} else {
 			packnull(buffer);
 			packnull(buffer);
@@ -7113,6 +7134,7 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 			pack32((uint32_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
 		}
 	} else {
 		error("_pack_default_job_details: protocol_version "
@@ -7124,20 +7146,8 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 static void _pack_pending_job_details(struct job_details *detail_ptr,
 				      Buf buffer, uint16_t protocol_version)
 {
-	uint16_t shared = 0;
-
-	if (!detail_ptr)
-		shared = (uint16_t) NO_VAL;
-	else if (detail_ptr->share_res == 1)
-		shared = 1;
-	else if ((detail_ptr->share_res == 0) ||
-		 (detail_ptr->whole_node == 1))
-		shared = 0;
-	else
-		shared = (uint16_t) NO_VAL;
 	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		if (detail_ptr) {
-			pack16(shared, buffer);
 			pack16(detail_ptr->contiguous, buffer);
 			pack16(detail_ptr->core_spec, buffer);
 			pack16(detail_ptr->cpus_per_task, buffer);
@@ -7163,7 +7173,6 @@ static void _pack_pending_job_details(struct job_details *detail_ptr,
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
-			pack16((uint16_t) 0, buffer);
 
 			pack32((uint32_t) 0, buffer);
 			pack32((uint32_t) 0, buffer);
@@ -7181,7 +7190,6 @@ static void _pack_pending_job_details(struct job_details *detail_ptr,
 		}
 	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
 		if (detail_ptr) {
-			pack16(shared, buffer);
 			pack16(detail_ptr->contiguous, buffer);
 			pack16(detail_ptr->cpus_per_task, buffer);
 			pack16(detail_ptr->pn_min_cpus, buffer);
@@ -7201,7 +7209,6 @@ static void _pack_pending_job_details(struct job_details *detail_ptr,
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
-			pack16((uint16_t) 0, buffer);
 
 			pack32((uint32_t) 0, buffer);
 			pack32((uint32_t) 0, buffer);
diff --git a/src/slurmctld/statistics.c b/src/slurmctld/statistics.c
index 6b335e9bbc62fa0aa6aa2a9174517f5294c54f56..89706a959aa2bdfb568210eac8f6c785bf0c7a32 100644
--- a/src/slurmctld/statistics.c
+++ b/src/slurmctld/statistics.c
@@ -155,4 +155,6 @@ extern void reset_stats(int level)
 	slurmctld_diag_stats.bf_last_depth = 0;
 	slurmctld_diag_stats.bf_last_depth_try = 0;
 	slurmctld_diag_stats.bf_active = 0;
+
+	last_proc_req_start = time(NULL);
 }