diff --git a/src/plugins/select/cons_res/job_test.c b/src/plugins/select/cons_res/job_test.c
index 4a3cd815e6d40d38aebc7add01860061bc98ff40..43aec63fde7436441330fb94f4efd4497c28e7c8 100644
--- a/src/plugins/select/cons_res/job_test.c
+++ b/src/plugins/select/cons_res/job_test.c
@@ -967,6 +967,23 @@ static bool _enough_nodes(int avail_nodes, int rem_nodes,
 	return (avail_nodes >= needed_nodes);
 }
 
+static void _cpus_to_use(int *avail_cpus, int rem_cpus, int rem_nodes,
+			 struct job_details *details_ptr, uint16_t *cpu_cnt)
+{
+	int resv_cpus;	/* CPUs to be allocated on other nodes */
+
+	if (details_ptr->shared == 0)	/* Use all CPUs on this node */
+		return;
+
+	resv_cpus = MAX((rem_nodes - 1), 0);
+	resv_cpus *= details_ptr->pn_min_cpus;	/* At least 1 */
+	rem_cpus -= resv_cpus;
+
+	if (*avail_cpus > rem_cpus) {
+		*avail_cpus = MAX(rem_cpus, (int)details_ptr->pn_min_cpus);
+		*cpu_cnt = *avail_cpus;
+	}
+}
 
 /* this is the heart of the selection process */
 static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
@@ -1180,14 +1197,8 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 				 * requested nodes here we will still give
 				 * them and then the step layout will sort
 				 * things out. */
-				if ((details_ptr->shared != 0) &&
-				    (avail_cpus > rem_cpus)) {
-					avail_cpus = MAX(rem_cpus,
-							 (int)details_ptr->
-							 pn_min_cpus);
-					cpu_cnt[i] = MAX(avail_cpus, 1);
-				}
-
+				_cpus_to_use(&avail_cpus, rem_cpus, rem_nodes,
+					     details_ptr, &cpu_cnt[i]);
 				total_cpus += avail_cpus;
 				/* enforce the max_cpus limit */
 				if ((details_ptr->max_cpus != NO_VAL) &&
@@ -1218,14 +1229,8 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 				 * requested nodes here we will still give
 				 * them and then the step layout will sort
 				 * things out. */
-				if ((details_ptr->shared != 0) &&
-				    (avail_cpus > rem_cpus)) {
-					avail_cpus = MAX(rem_cpus,
-							 (int)details_ptr->
-							 pn_min_cpus);
-					cpu_cnt[i] = MAX(avail_cpus, 1);
-				}
-
+				_cpus_to_use(&avail_cpus, rem_cpus, rem_nodes,
+					     details_ptr, &cpu_cnt[i]);
 				total_cpus += avail_cpus;
 				/* enforce the max_cpus limit */
 				if ((details_ptr->max_cpus != NO_VAL) &&
@@ -1302,14 +1307,8 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 				 * requested nodes here we will still give
 				 * them and then the step layout will sort
 				 * things out. */
-				if ((details_ptr->shared != 0) &&
-				    (avail_cpus > rem_cpus)) {
-					avail_cpus = MAX(rem_cpus,
-							 (int)details_ptr->
-							 pn_min_cpus);
-					cpu_cnt[i] = MAX(avail_cpus, 1);
-				}
-
+				_cpus_to_use(&avail_cpus, rem_cpus, rem_nodes,
+					     details_ptr, &cpu_cnt[i]);
 				total_cpus += avail_cpus;
 				/* enforce the max_cpus limit */
 				if ((details_ptr->max_cpus != NO_VAL) &&
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index ffd9b612b0d9bd77b91f884bb054b26235b09d4f..71c56877bcefa5ef71a07d11b2d765cf148062d5 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -4032,7 +4032,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 		}
 	} else {
 		detail_ptr->pn_min_cpus = MAX(detail_ptr->pn_min_cpus,
-					       detail_ptr->cpus_per_task);
+					      detail_ptr->cpus_per_task);
 	}
 	if (job_desc->requeue != (uint16_t) NO_VAL)
 		detail_ptr->requeue = MIN(job_desc->requeue, 1);
@@ -4552,7 +4552,8 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
 	if (job_desc_msg->min_cpus == NO_VAL)
 		job_desc_msg->min_cpus = job_desc_msg->min_nodes;
 
-	if (job_desc_msg->pn_min_cpus == (uint16_t) NO_VAL)
+	if ((job_desc_msg->pn_min_cpus == (uint16_t) NO_VAL) ||
+	    (job_desc_msg->pn_min_cpus == 0))
 		job_desc_msg->pn_min_cpus = 1;   /* default 1 cpu per node */
 	if (job_desc_msg->pn_min_tmp_disk == NO_VAL)
 		job_desc_msg->pn_min_tmp_disk = 0;/* default 0MB disk per node */
@@ -5973,7 +5974,8 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		update_accounting = true;
 	}
 
-	if (job_specs->pn_min_cpus != (uint16_t) NO_VAL) {
+	if ((job_specs->pn_min_cpus != (uint16_t) NO_VAL) &&
+	    (job_specs->pn_min_cpus != 0)) {
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
 			error_code = ESLURM_DISABLED;
 		else if (authorized