diff --git a/src/common/node_conf.c b/src/common/node_conf.c
index a3e2cdf86f28fd5a5ecf1fd07868346ea5b687d2..eea68a9e6362f0e93de738b650fb911ee22e8145 100644
--- a/src/common/node_conf.c
+++ b/src/common/node_conf.c
@@ -1094,3 +1094,18 @@ extern uint32_t cr_get_coremap_offset(uint32_t node_index)
 	xassert(cr_node_cores_offset);
 	return cr_node_cores_offset[node_index];
 }
+
+/* The value of threads should be from config_ptr->threads if slurm.conf has fast schedule turned on (config_record);
+ * 		otherwise, it should be from node_ptr->threads (node_record).
+ */
+extern int _adjust_cpus_nppcu(uint16_t ntasks_per_core, uint16_t threads, int cpus ) {
+
+	if (ntasks_per_core == 0xffff)
+		ntasks_per_core = threads;
+
+	/* Adjust the number of CPUs according to the percentage of the hwthreads/core being used. */
+	cpus *= ntasks_per_core;
+	cpus /= threads;
+
+	return cpus;
+}
diff --git a/src/common/node_conf.h b/src/common/node_conf.h
index bc3afcf227db61cd3707e7df63aab5fa13a8b477..22b44c6ff2434165601b6d736c420c7a74a048a6 100644
--- a/src/common/node_conf.h
+++ b/src/common/node_conf.h
@@ -281,4 +281,9 @@ extern void cr_fini_global_core_data(void);
 /*return the coremap index to the first core of the given node */
 extern uint32_t cr_get_coremap_offset(uint32_t node_index);
 
+/* Given the number of tasks per core and the actual number of hw threads, compute
+ * how many CPUs are "visible" and, hence, usable on the node.
+ */
+extern int _adjust_cpus_nppcu(uint16_t ntasks_per_core, uint16_t threads, int cpus);
+
 #endif /* !_HAVE_NODE_CONF_H */
diff --git a/src/common/slurm_resource_info.c b/src/common/slurm_resource_info.c
index 9b6c05332121c51e6314eeb2521b24c8cc732892..66d6d358d4ff013efba2a96d7622d5be451a5fb0 100644
--- a/src/common/slurm_resource_info.c
+++ b/src/common/slurm_resource_info.c
@@ -137,21 +137,9 @@ int slurm_get_avail_procs(const uint16_t socket_cnt,
 			  uint32_t job_id,
 			  char *name)
 {
-	uint16_t avail_cpus = 0, max_cpus = 0;
-	uint16_t allocated_cpus = 0, allocated_cores = 0, allocated_sockets = 0;
-	uint16_t max_avail_cpus = 0xffff;	/* for alloc_* accounting */
-	uint16_t min_sockets = 1, max_sockets = 0xffff;
-	uint16_t min_cores   = 1, max_cores   = 0xffff;
-	uint16_t                  max_threads = 0xffff;
-	int i;
+	uint16_t avail_cpus = 0;
 
         /* pick defaults for any unspecified items */
-	if (socket_cnt != (uint16_t) NO_VAL)
-		min_sockets = max_sockets = socket_cnt;
-	if (core_cnt != (uint16_t) NO_VAL)
-		min_cores = max_cores = core_cnt;	
-	if (thread_cnt != (uint16_t) NO_VAL)
-		max_threads = thread_cnt;
 	if (cpus_per_task <= 0)
 		cpus_per_task = 1;
 	if (*threads <= 0)
@@ -160,11 +148,7 @@ int slurm_get_avail_procs(const uint16_t socket_cnt,
 	    	*cores = 1;
 	if (*sockets <= 0)
 	    	*sockets = *cpus / *cores / *threads;
-	for (i = 0 ; alloc_cores && i < *sockets; i++) {
-		allocated_cores += alloc_cores[i];
-		if (alloc_cores[i])
-			allocated_sockets++;
-	}
+
 #if (DEBUG)
 	info("get_avail_procs %u %s User_ sockets %u cores %u threads %u",
 			job_id, name, socket_cnt, core_cnt, thread_cnt);
@@ -173,139 +157,31 @@ int slurm_get_avail_procs(const uint16_t socket_cnt,
 	info("get_avail_procs %u %s Ntask node   %u sockets %u core   %u",
 			job_id, name, ntaskspernode, ntaskspersocket,
 			ntaskspercore);
-	info("get_avail_procs %u %s cr_type %d cpus %u  alloc_ c %u s %u",
-			job_id, name, cr_type, *cpus, allocated_cores,
-			allocated_sockets);
+	info("get_avail_procs %u %s cr_type %d cpus %u",
+			job_id, name, cr_type, *cpus);
 	for (i = 0; alloc_cores && i < *sockets; i++)
 		info("get_avail_procs %u %s alloc_cores[%d] = %u",
 		     job_id, name, i, alloc_cores[i]);
 #endif
-	allocated_cpus = allocated_cores * (*threads);
-
-	/* For the following CR types, nodes have no notion of socket, core,
-	   and thread.  Only one level of logical processors */
-	if (cr_type & CR_CORE) {
-		if (*cpus >= allocated_cpus)
-			*cpus -= allocated_cpus;
-		else {
-			*cpus = 0;
-			error("cons_res: *cpus underflow");
-		}
-		if (allocated_cores > 0) {
-			max_avail_cpus = 0;
-			int tmp_diff = 0;
-			for (i=0; i<*sockets; i++) {
-				tmp_diff = *cores - alloc_cores[i];
-				if (min_cores <= tmp_diff) {
-					tmp_diff *= (*threads);
-					max_avail_cpus += tmp_diff;
-				}
-			}
-		}
-
-		/*** honor socket/core/thread maximums ***/
-		*sockets = MIN(*sockets, max_sockets);
-		*cores   = MIN(*cores,   max_cores);
-		*threads = MIN(*threads, max_threads);
-
-		if (min_sockets > *sockets) {
-			*cpus = 0;
-		} else {
-			int max_cpus_socket = 0;
-			max_cpus = 0;
-			for (i=0; i<*sockets; i++) {
-				max_cpus_socket = 0;
-				if (min_cores <= *cores) {
-				        int num_threads = *threads;
-					if (ntaskspercore > 0) {
-						num_threads = MIN(num_threads,
-							       ntaskspercore);
-					}
-					max_cpus_socket = *cores * num_threads;
-				}
-				if (ntaskspersocket > 0) {
-					max_cpus_socket = MIN(max_cpus_socket,
-							      ntaskspersocket);
-				}
-				max_cpus += max_cpus_socket;
-			}
-			max_cpus = MIN(max_cpus, max_avail_cpus);
-		}
-
-		/*** honor any availability maximum ***/
-		max_cpus = MIN(max_cpus, max_avail_cpus);
-
-		if (ntaskspernode > 0) {
-			max_cpus = MIN(max_cpus, ntaskspernode);
-		}
-	} else if (cr_type & CR_SOCKET) {
-		if (*sockets >= allocated_sockets)
-			*sockets -= allocated_sockets; /* sockets count */
-		else {
-			*sockets = 0;
-			error("cons_res: *sockets underflow");
-		}
-		if (*cpus >= allocated_cpus)
-			*cpus -= allocated_cpus;
-		else {
-			*cpus = 0;
-			error("cons_res: *cpus underflow");
-		}
-
-		if (min_sockets > *sockets)
-			*cpus = 0;
-
-		/*** honor socket/core/thread maximums ***/
-		*sockets = MIN(*sockets, max_sockets);
-		*cores   = MIN(*cores,   max_cores);
-		*threads = MIN(*threads, max_threads);
+	uint32_t nppcu = ntaskspercore;
 
-		/*** compute an overall maximum cpu count honoring ntasks* ***/
-		max_cpus  = *threads;
-		if (ntaskspercore > 0) {
-			max_cpus = MIN(max_cpus, ntaskspercore);
-		}
-		max_cpus *= *cores;
-		if (ntaskspersocket > 0) {
-			max_cpus = MIN(max_cpus, ntaskspersocket);
-		}
-		max_cpus *= *sockets;
-		if (ntaskspernode > 0) {
-			max_cpus = MIN(max_cpus, ntaskspernode);
-		}
-
-		/*** honor any availability maximum ***/
-		max_cpus = MIN(max_cpus, max_avail_cpus);
-	} else {	/* CR_CPU (default) */
-		if ((cr_type & CR_CPU) ||
-		    (!(cr_type & CR_MEMORY))) {
-			if (*cpus >= allocated_cpus)
-				*cpus -= allocated_cpus;
-			else {
-				*cpus = 0;
-				error("cons_res: *cpus underflow");
-			}
-		}
-
-		/*** compute an overall maximum cpu count honoring ntasks* ***/
-		max_cpus  = *cpus;
-		if (ntaskspernode > 0) {
-			max_cpus = MIN(max_cpus, ntaskspernode);
-		}
+	if (nppcu == 0xffff) { /* nppcu was not explicitly set, use all threads */
+		info( "***** nppcu was not explicitly set, use all threads *****");
+		nppcu = *threads;
 	}
 
-	/*** factor cpus_per_task into max_cpus ***/
-	max_cpus *= cpus_per_task;
-	/*** round down available based on cpus_per_task ***/
-	avail_cpus = (*cpus / cpus_per_task) * cpus_per_task;
-	avail_cpus = MIN(avail_cpus, max_cpus);
+	avail_cpus = *sockets * *cores * *threads;
+	avail_cpus = avail_cpus * nppcu / *threads;
+
+	if(ntaskspernode>0)
+		avail_cpus = MIN(avail_cpus, ntaskspernode * cpus_per_task);
 
 #if (DEBUG)
 	info("get_avail_procs %u %s return cpus %u sockets %u cores %u threads %u",
 			job_id, name, *cpus, *sockets, *cores, *threads);
 	info("get_avail_procs %d %s avail_cpus %u",  job_id, name, avail_cpus);
 #endif
-	return(avail_cpus);
+	return avail_cpus;
 }
 
 /*
diff --git a/src/plugins/select/cray/basil_alps.h b/src/plugins/select/cray/basil_alps.h
index 6660fbc8cefaf5c5281cda3813a7fd7a265b1a6a..470fa3323d0ae9e1e3fdf1c6b5e86f74c76fa46e 100644
--- a/src/plugins/select/cray/basil_alps.h
+++ b/src/plugins/select/cray/basil_alps.h
@@ -448,7 +448,8 @@ struct basil_rsvn_param {
 				depth,		/* depth > 0,         -d  */
 				nppn,		/* nppn > 0,          -N  */
 				npps,		/* PEs per segment,   -S  */
-				nspn;		/* segments per node, -sn */
+				nspn,		/* segments per node, -sn */
+				nppcu;		/* Processors Per Compute Unit. BASIL 1.3 */
 
 	char				*nodes;		/* NodeParamArray   */
 	struct basil_label		*labels;	/* LabelParamArray  */
@@ -622,7 +623,7 @@ extern void   free_inv(struct basil_inventory *inv);
 
 extern long basil_reserve(const char *user, const char *batch_id,
 			  uint32_t width, uint32_t depth, uint32_t nppn,
-			  uint32_t mem_mb, struct nodespec *ns_head,
+			  uint32_t mem_mb, uint32_t nppcu, struct nodespec *ns_head,
 			  struct basil_accel_param *accel_head);
 extern int basil_confirm(uint32_t rsvn_id, int job_id, uint64_t pagg_id);
 extern const struct basil_rsvn *basil_rsvn_by_id(const struct basil_inventory *inv,
diff --git a/src/plugins/select/cray/basil_interface.c b/src/plugins/select/cray/basil_interface.c
index a91376d9b63d0383e4fa0e102b969b3e2428cdb0..4f48e9ba2f73c38c11302a56b4c43091052bf963 100644
--- a/src/plugins/select/cray/basil_interface.c
+++ b/src/plugins/select/cray/basil_interface.c
@@ -741,6 +741,8 @@ extern int do_basil_reserve(struct job_record *job_ptr)
 	long rc;
 	char *user, batch_id[16];
 	struct basil_accel_param* bap;
+	uint16_t nppcu = 0;
+	uint16_t hwthreads_per_core = 1;
 
 	if (!job_ptr->job_resrcs || job_ptr->job_resrcs->nhosts == 0)
 		return SLURM_SUCCESS;
@@ -773,6 +775,17 @@ extern int do_basil_reserve(struct job_record *job_ptr)
 		node_min_mem = job_ptr->details->pn_min_memory;
 	}
 
+	if (slurmctld_conf.select_type_param & CR_ONE_TASK_PER_CORE) {
+		if ( job_ptr->details && job_ptr->details->mc_ptr && job_ptr->details->mc_ptr->ntasks_per_core == 0xffff ) {
+			nppcu = 1;
+			debug("No explicit ntasks-per-core has been set, using nppcu=1.");
+		}
+	}
+
+	if ( job_ptr->details && job_ptr->details->mc_ptr && job_ptr->details->mc_ptr->ntasks_per_core != 0xffff ) {
+		nppcu = job_ptr->details->mc_ptr->ntasks_per_core;
+	}
+
 	for (i = first_bit; i <= last_bit; i++) {
 		struct node_record *node_ptr = node_record_table_ptr + i;
 		uint32_t basil_node_id;
@@ -836,6 +849,12 @@ extern int do_basil_reserve(struct job_record *job_ptr)
 	for (i = 0; i < job_ptr->job_resrcs->nhosts; i++) {
 		uint32_t node_tasks = job_ptr->job_resrcs->cpus[i] / mppdepth;
 
+		if ( job_ptr->job_resrcs->sockets_per_node[i] > 0 && job_ptr->job_resrcs->cores_per_socket[i] > 0 )
+			hwthreads_per_core = job_ptr->job_resrcs->cpus[i] / job_ptr->job_resrcs->sockets_per_node[i] / job_ptr->job_resrcs->cores_per_socket[i];
+
+		if (nppcu)
+			node_tasks = node_tasks * nppcu / hwthreads_per_core;
+
 		if (mppnppn && mppnppn < node_tasks)
 			node_tasks = mppnppn;
 		mppwidth += node_tasks;
@@ -850,7 +869,7 @@ extern int do_basil_reserve(struct job_record *job_ptr)
 		bap = NULL;
 
 	rc   = basil_reserve(user, batch_id, mppwidth, mppdepth, mppnppn,
-			     mppmem, ns_head, bap);
+			     mppmem, (uint32_t)nppcu, ns_head, bap);
 	xfree(user);
 	if (rc <= 0) {
 		/* errno value will be resolved by select_g_job_begin() */
diff --git a/src/plugins/select/cray/libalps/basil_request.c b/src/plugins/select/cray/libalps/basil_request.c
index d704ee837e82083be9866447ad4f38f27b691fba..15f66f0baefbbf803191036420a06e5870c75790 100644
--- a/src/plugins/select/cray/libalps/basil_request.c
+++ b/src/plugins/select/cray/libalps/basil_request.c
@@ -88,9 +88,9 @@ static void _rsvn_write_reserve_xml(FILE *fp, struct basil_reservation *r,
 		if (version >= BV_5_1)
 			_write_xml(fp, "  <ReserveParam architecture=\"%s\" "
 				   "width=\"%ld\" depth=\"%ld\" nppn=\"%ld\""
-				   " nppcu=\"0\"",
+				   " nppcu=\"%d\"",
 				   nam_arch[param->arch],
-				   param->width, param->depth, param->nppn);
+				   param->width, param->depth, param->nppn, param->nppcu);
 		else
 			_write_xml(fp, "  <ReserveParam architecture=\"%s\" "
 				   "width=\"%ld\" depth=\"%ld\" nppn=\"%ld\"",
diff --git a/src/plugins/select/cray/libalps/do_reserve.c b/src/plugins/select/cray/libalps/do_reserve.c
index 337f092a8267cd6901b86580aaee288154f26616..fc896ab38b008a5a1a1f4b597eb13a8388fd28ec 100644
--- a/src/plugins/select/cray/libalps/do_reserve.c
+++ b/src/plugins/select/cray/libalps/do_reserve.c
@@ -44,7 +44,7 @@ static int _rsvn_add_mem_param(struct basil_rsvn_param *rp, uint32_t mem_mb)
  */
 static int _rsvn_add_params(struct basil_reservation *resv,
 			    uint32_t width, uint32_t depth, uint32_t nppn,
-			    uint32_t mem_mb, char *mppnodes,
+			    uint32_t mem_mb, char *mppnodes, uint32_t nppcu,
 			    struct basil_accel_param *accel)
 {
 	struct basil_rsvn_param *rp = xmalloc(sizeof(*rp));
@@ -57,6 +57,7 @@ static int _rsvn_add_params(struct basil_reservation *resv,
 	rp->depth = depth;
 	rp->nppn  = nppn;
 	rp->nodes = mppnodes;
+	rp->nppcu = nppcu;
 	rp->accel = accel;
 
 	if (mem_mb && _rsvn_add_mem_param(rp, mem_mb) < 0) {
@@ -89,7 +90,7 @@ static struct basil_reservation *_rsvn_new(const char *user,
 					   const char *batch_id,
 					   uint32_t width, uint32_t depth,
 					   uint32_t nppn, uint32_t mem_mb,
-					   char *mppnodes,
+					   uint32_t nppcu, char *mppnodes,
 					   struct basil_accel_param *accel)
 {
 	struct basil_reservation *res;
@@ -105,7 +106,7 @@ static struct basil_reservation *_rsvn_new(const char *user,
 		strncpy(res->batch_id, batch_id, sizeof(res->batch_id));
 
 	if (_rsvn_add_params(res, width, depth, nppn,
-			     mem_mb, mppnodes, accel) < 0) {
+			     mem_mb, mppnodes, nppcu, accel) < 0) {
 		free_rsvn(res);
 		return NULL;
 	}
@@ -127,7 +128,7 @@ static struct basil_reservation *_rsvn_new(const char *user,
  */
 long basil_reserve(const char *user, const char *batch_id,
 		   uint32_t width, uint32_t depth, uint32_t nppn,
-		   uint32_t mem_mb, struct nodespec *ns_head,
+		   uint32_t mem_mb, uint32_t nppcu, struct nodespec *ns_head,
 		   struct basil_accel_param *accel_head)
 {
 	struct basil_reservation *rsvn;
@@ -138,7 +139,7 @@ long basil_reserve(const char *user, const char *batch_id,
 
 	free_nodespec(ns_head);
 	rsvn = _rsvn_new(user, batch_id, width, depth, nppn, mem_mb,
-			 mppnodes, accel_head);
+			 nppcu, mppnodes, accel_head);
 	if (rsvn == NULL)
 		return -BE_INTERNAL;
 
diff --git a/src/plugins/select/cray/libemulate/alps_emulate.c b/src/plugins/select/cray/libemulate/alps_emulate.c
index 9707e53d449d043c84de0f17f3fcb7e6646bbdf0..09d14ec65f5f32675eef8e56fbacdd25e1e41ca7 100644
--- a/src/plugins/select/cray/libemulate/alps_emulate.c
+++ b/src/plugins/select/cray/libemulate/alps_emulate.c
@@ -507,7 +507,7 @@ extern void   free_inv(struct basil_inventory *inv)
 
 extern long basil_reserve(const char *user, const char *batch_id,
 			  uint32_t width, uint32_t depth, uint32_t nppn,
-			  uint32_t mem_mb, struct nodespec *ns_head,
+			  uint32_t mem_mb, uint32_t nppcu, struct nodespec *ns_head,
 			  struct basil_accel_param *accel_head)
 {
 	int i;
@@ -516,8 +516,8 @@ extern long basil_reserve(const char *user, const char *batch_id,
 #if _DEBUG
 	struct nodespec *my_node_spec;
 	info("basil_reserve user:%s batch_id:%s width:%u depth:%u nppn:%u "
-	     "mem_mb:%u",
-	     user, batch_id, width, depth, nppn, mem_mb);
+	     "mem_mb:%u nppcu:%u",
+	     user, batch_id, width, depth, nppn, mem_mb, nppcu);
 	my_node_spec = ns_head;
 	while (my_node_spec) {
 		info("basil_reserve node_spec:start:%u,end:%u",
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index b228974373871c5d126c45ab45b1d0a38cc8f81b..be2967feb658c42fbf9198847bb4e61d7c5dd72d 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -503,12 +503,11 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index)
 	debug2("host %s HW_ cpus %u boards %u sockets %u cores %u threads %u ",
 	       node_ptr->name, cpus, boards, sockets, cores, threads);
 #endif
-
 	avail_cpus = slurm_get_avail_procs(
 		min_sockets, min_cores, min_threads, cpus_per_task,
 		ntasks_per_node, ntasks_per_socket, ntasks_per_core,
 		&cpus, &sockets, &cores, &threads, NULL,
-		CR_CPU, job_ptr->job_id, node_ptr->name);
+		CR_CORE, job_ptr->job_id, node_ptr->name);
 
 #if SELECT_DEBUG
 	debug("avail_cpus index %d = %d (out of %d %d %d %d)",
@@ -532,6 +531,32 @@ static uint16_t _get_total_cpus(int index)
 		return node_ptr->cpus;
 }
 
+static uint16_t _get_total_threads(int index)
+{
+	struct node_record *node_ptr = &(select_node_ptr[index]);
+	if (select_fast_schedule)
+		return node_ptr->config_ptr->threads;
+	else
+		return node_ptr->threads;
+}
+
+/*
+ * _get_ntasks_per_core - Retrieve the value of ntasks_per_core from
+ *	the given job_details record.  If it wasn't set, return 0xffff.
+ *	Intended for use with the _adjust_cpus_nppcu function.
+ */
+
+static uint16_t _get_ntasks_per_core(struct job_details *details) {
+	uint16_t ntasks_per_core;
+
+	if ((details->mc_ptr))
+		ntasks_per_core = details->mc_ptr->ntasks_per_core;
+	else
+		ntasks_per_core = 0xffff;
+
+	return ntasks_per_core;
+}
+
 static job_resources_t *_create_job_resources(int node_cnt)
 {
 	job_resources_t *job_resrcs_ptr;
@@ -553,7 +578,7 @@ static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
 {
 	int i, j, k;
 	int first_bit, last_bit;
-	uint32_t node_cpus, total_cpus = 0, node_cnt;
+	uint32_t node_cpus, total_cpus = 0, node_cnt, node_threads;
 	struct node_record *node_ptr;
 	uint32_t job_memory_cpu = 0, job_memory_node = 0;
 	job_resources_t *job_resrcs_ptr;
@@ -586,10 +611,15 @@ static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
 		if (!bit_test(bitmap, i))
 			continue;
 		node_ptr = &(select_node_ptr[i]);
-		if (select_fast_schedule)
-			node_cpus = node_ptr->config_ptr->cpus;
-		else
-			node_cpus = node_ptr->cpus;
+		if (select_fast_schedule) {
+			node_cpus    = node_ptr->config_ptr->cpus;
+			node_threads = node_ptr->config_ptr->threads;
+		} else {
+			node_cpus    = node_ptr->cpus;
+			node_threads = node_ptr->threads;
+		}
+
+		node_cpus = _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), node_threads, node_cpus);
 		job_resrcs_ptr->cpus[j] = node_cpus;
 		if ((k == -1) ||
 		    (job_resrcs_ptr->cpu_array_value[k] != node_cpus)) {
@@ -861,7 +891,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				rem_nodes--;
 				max_nodes--;
 				rem_cpus   -= avail_cpus;
-				total_cpus += _get_total_cpus(index);
+				total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(index), _get_total_cpus(index));
 			} else {	 /* node not required (yet) */
 				bit_clear(bitmap, index);
 				consec_cpus[consec_index] += avail_cpus;
@@ -968,7 +998,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		if (best_fit_nodes == 0)
 			break;
 		if (job_ptr->details->contiguous &&
-		    ((best_fit_cpus < rem_cpus) ||
+		     ((best_fit_cpus < rem_cpus) ||
 		     (!_enough_nodes(best_fit_nodes, rem_nodes,
 				     min_nodes, req_nodes))))
 			break;	/* no hole large enough */
@@ -988,7 +1018,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				max_nodes--;
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
-				total_cpus += _get_total_cpus(i);
+				total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(i), _get_total_cpus(i));
 			}
 			for (i = (best_fit_req - 1);
 			     i >= consec_start[best_fit_location]; i--) {
@@ -1002,7 +1032,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				max_nodes--;
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
-				total_cpus += _get_total_cpus(i);
+				total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(i), _get_total_cpus(i));
 			}
 		} else {
 			for (i = consec_start[best_fit_location];
@@ -1017,7 +1047,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				max_nodes--;
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
-				total_cpus += _get_total_cpus(i);
+				total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(i), _get_total_cpus(i));
 			}
 		}
 		if (job_ptr->details->contiguous ||
@@ -1243,7 +1273,7 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 				bit_set(bitmap, i);
 				alloc_nodes++;
 				rem_cpus -= avail_cpus;
-				total_cpus += _get_total_cpus(i);
+				total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(i), _get_total_cpus(i));
 			}
 		}
 		/* Accumulate additional resources from leafs that
@@ -1278,7 +1308,7 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 				bit_set(bitmap, i);
 				alloc_nodes++;
 				rem_cpus -= _get_avail_cpus(job_ptr, i);
-				total_cpus += _get_total_cpus(i);
+				total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(i), _get_total_cpus(i));
 				if ((alloc_nodes > max_nodes) ||
 				    ((alloc_nodes >= want_nodes) &&
 				     (rem_cpus <= 0)))
@@ -1348,7 +1378,7 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			bit_set(bitmap, i);
 			alloc_nodes++;
 			rem_cpus -= _get_avail_cpus(job_ptr, i);
-			total_cpus += _get_total_cpus(i);
+			total_cpus += _adjust_cpus_nppcu(_get_ntasks_per_core(job_ptr->details), _get_total_threads(i), _get_total_cpus(i));
 			if ((alloc_nodes > max_nodes) ||
 			    ((alloc_nodes >= want_nodes) && (rem_cpus <= 0)))
 				break;
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 93fb420bec3d44846be1511eabf45653dab7f1c4..120b4235e8232ee9cdd0a77d6fd5f1ed3b8ee5b7 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -130,6 +130,22 @@ static bitstr_t *_valid_features(struct job_details *detail_ptr,
 
 static int _fill_in_gres_fields(struct job_record *job_ptr);
 
+/*
+ * _get_ntasks_per_core - Retrieve the value of ntasks_per_core from
+ *	the given job_details record.  If it wasn't set, return 0xffff.
+ *	Intended for use with the _adjust_cpus_nppcu function.
+ */
+static uint16_t _get_ntasks_per_core(struct job_details *details) {
+	uint16_t ntasks_per_core;
+
+	if ((details->mc_ptr))
+		ntasks_per_core = details->mc_ptr->ntasks_per_core;
+	else
+		ntasks_per_core = 0xffff;
+
+	return ntasks_per_core;
+}
+
 /*
  * _build_gres_alloc_string - Fill in the gres_alloc string field for a
  *      given job_record
@@ -2073,7 +2089,7 @@ static int _build_node_list(struct job_record *job_ptr,
 			list_next(config_iterator))) {
 
 		config_filter = 0;
-		if ((detail_ptr->pn_min_cpus     > config_ptr->cpus       ) ||
+		if ((detail_ptr->pn_min_cpus     >  _adjust_cpus_nppcu(_get_ntasks_per_core(detail_ptr), config_ptr->threads, config_ptr->cpus)) ||
 		    ((detail_ptr->pn_min_memory & (~MEM_PER_CPU)) >
 		      config_ptr->real_memory)                               ||
 		    (detail_ptr->pn_min_tmp_disk > config_ptr->tmp_disk))
@@ -2234,7 +2250,7 @@ static void _filter_nodes_in_set(struct node_set *node_set_ptr,
 				continue;
 
 			node_con = node_record_table_ptr[i].config_ptr;
-			if ((job_con->pn_min_cpus     <= node_con->cpus)    &&
+			if ((job_con->pn_min_cpus     <= _adjust_cpus_nppcu(_get_ntasks_per_core(job_con), node_con->threads, node_con->cpus))    &&
 			    ((job_con->pn_min_memory & (~MEM_PER_CPU)) <=
 			      node_con->real_memory)                         &&
 			    (job_con->pn_min_tmp_disk <= node_con->tmp_disk))
@@ -2263,7 +2279,7 @@ static void _filter_nodes_in_set(struct node_set *node_set_ptr,
 				continue;
 
 			node_ptr = &node_record_table_ptr[i];
-			if ((job_con->pn_min_cpus     <= node_ptr->cpus)    &&
+			if ((job_con->pn_min_cpus     <= _adjust_cpus_nppcu(_get_ntasks_per_core(job_con), node_ptr->threads, node_ptr->cpus))    &&
 			    ((job_con->pn_min_memory & (~MEM_PER_CPU)) <=
 			      node_ptr->real_memory)                         &&
 			    (job_con->pn_min_tmp_disk <= node_ptr->tmp_disk))