diff --git a/NEWS b/NEWS
index 5c761dd99588056dbb88c65f97d89e7fdf1cff98..6180b26906b1dd79d4c4d4fda0e2007b73916f8f 100644
--- a/NEWS
+++ b/NEWS
@@ -104,6 +104,7 @@ documents those changes that are of interest to users and admins.
     Fenoy, BSC.
  -- Avoid orphan job step if slurmctld is down when a job step completes.
  -- Fix Lua link order, patch from Pär Andersson, NSC.
+ -- Set SLURM_CPUS_PER_TASK=1 when user specifies --cpus-per-task=1.
 
 * Changes in SLURM 2.3.1
 ========================
diff --git a/src/common/env.c b/src/common/env.c
index c0aa4426d3d215b6867805850314cc281b41f7bd..7691d1b850fd3a0e29aff30fd2060ca7ca8a9ff0 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -1121,15 +1121,11 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", num_nodes);
 	env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", batch->nodes);
 
-	if((batch->cpus_per_task != 0) &&
-	   (batch->cpus_per_task != (uint16_t) NO_VAL))
+	if ((batch->cpus_per_task != 0) &&
+	    (batch->cpus_per_task != (uint16_t) NO_VAL))
 		cpus_per_task = batch->cpus_per_task;
 	else
 		cpus_per_task = 1;	/* default value */
-	if (cpus_per_task > 1) {
-		env_array_overwrite_fmt(dest, "SLURM_CPUS_PER_TASK", "%u",
-					cpus_per_task);
-	}
 
 	if(num_tasks) {
 		env_array_overwrite_fmt(dest, "SLURM_NTASKS", "%u",
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index e0a99ed124e55099205b6717bd4717f1435f8791..a546c4619dd3be85e8357abf4f81f72928e63c20 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -283,7 +283,7 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
-	opt.cpus_per_task = 1;
+	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
 	opt.max_nodes = 0;
@@ -718,8 +718,7 @@ void set_options(const int argc, char **argv)
 			break;
 		case 'c':
 			opt.cpus_set = true;
-			opt.cpus_per_task =
-				_get_int(optarg, "cpus-per-task");
+			opt.cpus_per_task = _get_int(optarg, "cpus-per-task");
 			break;
 		case 'C':
 			xfree(opt.constraints);
@@ -1261,7 +1260,7 @@ static bool _opt_verify(void)
 		opt.ntasks_set = 1;
 	}
 
-	if (opt.mincpus < opt.cpus_per_task)
+	if (opt.cpus_set && (opt.mincpus < opt.cpus_per_task))
 		opt.mincpus = opt.cpus_per_task;
 
 	if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid))
@@ -1283,7 +1282,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.cpus_per_task <= 0) {
+	if (opt.cpus_set && (opt.cpus_per_task <= 0)) {
 		error("invalid number of cpus per task (-c %d)",
 		      opt.cpus_per_task);
 		verified = false;
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index ff800f5b81b683ac8a569d432cefefc1453eab5b..fc02fc03f968557ad202060b1d471c8e858dc3cd 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -394,7 +394,7 @@ int main(int argc, char *argv[])
 		/* keep around for old scripts */
 		env_array_append_fmt(&env, "SLURM_NPROCS", "%d", opt.ntasks);
 	}
-	if (opt.cpus_per_task > 1) {
+	if (opt.cpus_set) {
 		env_array_append_fmt(&env, "SLURM_CPUS_PER_TASK", "%d",
 				     opt.cpus_per_task);
 	}
@@ -700,8 +700,10 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.overcommit) {
 		desc->min_cpus = opt.min_nodes;
 		desc->overcommit = opt.overcommit;
-	} else
+	} else if (opt.cpus_set)
 		desc->min_cpus = opt.ntasks * opt.cpus_per_task;
+	else
+		desc->min_cpus = opt.ntasks;
 	if (opt.ntasks_set)
 		desc->num_tasks = opt.ntasks;
 	if (opt.cpus_set)
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index 02775047a4afdd3950c9ece75ce5493362c7348e..d60ebe449b499ae8ed4c93babc9d83f282aae1a9 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -292,7 +292,7 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
-	opt.cpus_per_task = 1;
+	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
 	opt.max_nodes = 0;
@@ -1160,8 +1160,7 @@ static void _set_options(int argc, char **argv)
 			break;
 		case 'c':
 			opt.cpus_set = true;
-			opt.cpus_per_task =
-				_get_int(optarg, "cpus-per-task");
+			opt.cpus_per_task = _get_int(optarg, "cpus-per-task");
 			break;
 		case 'C':
 			xfree(opt.constraints);
@@ -2135,7 +2134,7 @@ static bool _opt_verify(void)
 		opt.ntasks_set = 1;
 	}
 
-	if (opt.mincpus < opt.cpus_per_task)
+	if (opt.cpus_set && (opt.mincpus < opt.cpus_per_task))
 		opt.mincpus = opt.cpus_per_task;
 
 	if ((opt.job_name == NULL) && (opt.script_argc > 0))
@@ -2149,7 +2148,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.cpus_per_task <= 0) {
+	if (opt.cpus_set && (opt.cpus_per_task <= 0)) {
 		error("invalid number of cpus per task (-c %d)",
 		      opt.cpus_per_task);
 		verified = false;
@@ -2226,22 +2225,27 @@ static bool _opt_verify(void)
 		}
 	}
 
+	if (opt.cpus_set &&
+	    setenvf(NULL, "SLURM_CPUS_PER_TASK", "%d", opt.cpus_per_task)) {
+		error("Can't set SLURM_CPUS_PER_TASK env variable");
+	}
+
 	_set_distribution(opt.distribution, &dist, &lllp_dist);
-	if(dist)
-		if (setenvf(NULL, "SLURM_DISTRIBUTION", "%s", dist)) {
-			error("Can't set SLURM_DISTRIBUTION env variable");
-		}
+	if (dist &&
+	    setenvf(NULL, "SLURM_DISTRIBUTION", "%s", dist)) {
+		error("Can't set SLURM_DISTRIBUTION env variable");
+	}
 
-	if(opt.distribution == SLURM_DIST_PLANE)
-		if (setenvf(NULL, "SLURM_DIST_PLANESIZE", "%d",
-			    opt.plane_size)) {
-			error("Can't set SLURM_DIST_PLANESIZE env variable");
-		}
+	if ((opt.distribution == SLURM_DIST_PLANE) &&
+	    setenvf(NULL, "SLURM_DIST_PLANESIZE", "%d", opt.plane_size)) {
+		error("Can't set SLURM_DIST_PLANESIZE env variable");
+	}
 
-	if(lllp_dist)
-		if (setenvf(NULL, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
-			error("Can't set SLURM_DIST_LLLP env variable");
-		}
+	if (lllp_dist && setenvf(NULL, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
+		error("Can't set SLURM_DIST_LLLP env variable");
+	}
+
+	
 
 	/* bound threads/cores from ntasks_cores/sockets */
 	if (opt.ntasks_per_core > 0) {
@@ -2664,8 +2668,8 @@ static void _opt_list(void)
 	info("cwd               : %s", opt.cwd);
 	info("ntasks            : %d %s", opt.ntasks,
 		opt.ntasks_set ? "(set)" : "(default)");
-	info("cpus_per_task     : %d %s", opt.cpus_per_task,
-		opt.cpus_set ? "(set)" : "(default)");
+	if (opt.cpus_set)
+		info("cpus_per_task     : %d", opt.cpus_per_task);
 	if (opt.max_nodes) {
 		info("nodes             : %d-%d",
 		     opt.min_nodes, opt.max_nodes);
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 78c3af720769d1c022bcb11a10bba82431415c04..e562f815eced66e3307768d162a43bc39eb8f555 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -351,8 +351,10 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.overcommit) {
 		desc->min_cpus = MAX(opt.min_nodes, 1);
 		desc->overcommit = opt.overcommit;
-	} else
+	} else if (opt.cpus_set)
 		desc->min_cpus = opt.ntasks * opt.cpus_per_task;
+	else
+		desc->min_cpus = opt.ntasks;
 	desc->max_cpus = desc->max_cpus;
 
 	if (opt.ntasks_set)
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index d3799155ada503da1e201491e9c6fad19a86008f..c5c153832e6d14728da5ce9ffbb4ca395e4e91e7 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -692,8 +692,10 @@ job_desc_msg_create_from_opts (void)
 	if (opt.overcommit) {
 		j->min_cpus    = opt.min_nodes;
 		j->overcommit  = opt.overcommit;
-	} else
+	} else if (opt.cpus_set)
 		j->min_cpus    = opt.ntasks * opt.cpus_per_task;
+	else
+		j->min_cpus    = opt.ntasks;
 	if (opt.ntasks_set)
 		j->num_tasks   = opt.ntasks;
 
@@ -797,8 +799,10 @@ create_job_step(srun_job_t *job, bool use_all_cpus)
 		job->ctx_params.cpu_count = job->cpu_count;
 	else if (opt.overcommit)
 		job->ctx_params.cpu_count = job->ctx_params.min_nodes;
+	else if (opt.cpus_set)
+		job->ctx_params.cpu_count = opt.ntasks * opt.cpus_per_task;
 	else
-		job->ctx_params.cpu_count = opt.ntasks*opt.cpus_per_task;
+		job->ctx_params.cpu_count = opt.ntasks;
 
 	job->ctx_params.relative = (uint16_t)opt.relative;
 	job->ctx_params.ckpt_interval = (uint16_t)opt.ckpt_interval;
diff --git a/src/srun/opt.c b/src/srun/opt.c
index d119dcad2b09b1e427746e687385cd497b7060a8..5a9485fdcbf8ac215a8fee2a556785786ac6648a 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -323,7 +323,7 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
-	opt.cpus_per_task = 1;
+	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
 	opt.max_nodes = 0;
@@ -1847,13 +1847,13 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.pn_min_cpus < opt.cpus_per_task)
+	if (opt.cpus_set && (opt.pn_min_cpus < opt.cpus_per_task))
 		opt.pn_min_cpus = opt.cpus_per_task;
 
 	if (opt.argc > 0)
 		opt.cmd_name = base_name(opt.argv[0]);
 
-	if(!opt.nodelist) {
+	if (!opt.nodelist) {
 		if((opt.nodelist = xstrdup(getenv("SLURM_HOSTFILE")))) {
 			/* make sure the file being read in has a / in
 			   it to make sure it is a file in the
@@ -1938,7 +1938,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.cpus_per_task < 0) {
+	if (opt.cpus_set && (opt.cpus_per_task <= 0)) {
 		error("invalid number of cpus per task (-c %d)",
 		      opt.cpus_per_task);
 		verified = false;
@@ -2332,8 +2332,8 @@ static void _opt_list(void)
 	info("cwd            : %s", opt.cwd);
 	info("ntasks         : %d %s", opt.ntasks,
 	     opt.ntasks_set ? "(set)" : "(default)");
-	info("cpus_per_task  : %d %s", opt.cpus_per_task,
-	     opt.cpus_set ? "(set)" : "(default)");
+	if (opt.cpus_set)
+		info("cpus_per_task  : %d", opt.cpus_per_task);
 	if (opt.max_nodes)
 		info("nodes          : %d-%d", opt.min_nodes, opt.max_nodes);
 	else {
diff --git a/src/srun/srun.c b/src/srun/srun.c
index c18f4aaf9eeef2ff17d6951eb46ff4ba044d62d9..4acfe04c499e65f0f225fdee9bf75768306aafed 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -394,7 +394,8 @@ int srun(int ac, char **av)
 	/*
 	 *  Enhance environment for job
 	 */
-	env->cpus_per_task = opt.cpus_per_task;
+	if (opt.cpus_set)
+		env->cpus_per_task = opt.cpus_per_task;
 	if (opt.ntasks_per_node != NO_VAL)
 		env->ntasks_per_node = opt.ntasks_per_node;
 	if (opt.ntasks_per_socket != NO_VAL)
@@ -502,7 +503,10 @@ int srun(int ac, char **av)
 	if (opt.acctg_freq >= 0)
 		launch_params.acctg_freq = opt.acctg_freq;
 	launch_params.pty = opt.pty;
-	launch_params.cpus_per_task	= opt.cpus_per_task;
+	if (opt.cpus_set)
+		launch_params.cpus_per_task	= opt.cpus_per_task;
+	else
+		launch_params.cpus_per_task	= 1;
 	launch_params.task_dist         = opt.distribution;
 	launch_params.ckpt_dir		= opt.ckpt_dir;
 	launch_params.restart_dir       = opt.restart_dir;