diff --git a/NEWS b/NEWS
index 7215667088413efb67d00ff460dd92950c44418e..aacdd285cacccbb490558bca3d61d18fd5c27d6a 100644
--- a/NEWS
+++ b/NEWS
@@ -15,7 +15,7 @@ documents those changes that are of interest to users and admins.
     rather than the sbatch command itself to permit faster response
     for Moab.
  -- IMPORTANT FIX: This only effects use of select/cons_res when allocating
-    resources by core or socket, not by nodes or processors (the default). 
+    resources by core or socket, not by CPU (default for SelectTypeParameter). 
     We are not saving a pending job's task distribution, so after restarting
     slurmctld, select/cons_res was over-allocating resources based upon an 
     invalid task distribution value. Since we can't save the value without 
@@ -23,6 +23,8 @@ documents those changes that are of interest to users and admins.
     value for now and save it in Slurm v1.4. This may result in a slight 
     variation on how sockets and cores are allocated to jobs, but at least 
     resources will not be over-allocated.
+ -- Correct logic in accumulating resources by node weight when more than 
+    one job can run per node (select/cons_res or partition shared=yes|force).
 
 * Changes in SLURM 1.3.3
 ========================
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index d7de9fe7aa80dba95e607b7df9ef55013f698939..53806a61db9567b0c8b2011e8133bff85f41734c 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -663,7 +663,8 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			avail_nodes = bit_set_count(avail_bitmap);
 			tried_sched = false;	/* need to test these nodes */
 
-			if (shared) {
+			if (shared && ((i+1) < node_set_size) && 
+			    (node_set_ptr[i].weight == node_set_ptr[i+1].weight)) {
 				/* Keep accumulating so we can pick the
 				 * most lighly loaded nodes */
 				continue;