diff --git a/NEWS b/NEWS
index 33ac2b275db6de19d15cf2fccd5cbe0ca8e041a1..21c4f2ef8665bae601043c9bd378f55590e47fe1 100644
--- a/NEWS
+++ b/NEWS
@@ -8,7 +8,9 @@ documents those changes that are of interest to users and admins.
  -- Honor ntasks-per-node option with exclusive node allocations.
  -- sched/backfill - Prevent invalid memory reference if bf_continue option is
     configured and slurm is reconfigured during one of the sleep cycles or if
-    there are any changes to the partition configuration.
+    there are any changes to the partition configuration or if the normal
+    scheduler runs and starts a job that the backfill scheduler is actively
+    working on.
  -- Update man pages information about acct-freq and JobAcctGatherFrequency
     to reflect only the latest supported format.
  -- Minor document update to include note about PrivateData=Usage for the
@@ -25,6 +27,15 @@ documents those changes that are of interest to users and admins.
  -- init scripts ignore quotes around Pid file name specifications.
  -- Fixed typo about command case in quickstart.html.
  -- task/cgroup - handle new cpuset files, similar to commit c4223940.
+ -- Replace the tempname() function call with mkstemp().
+ -- Fix for --cpu_bind=map_cpu/mask_cpu/map_ldom/mask_ldom plus
+    --mem_bind=map_mem/mask_mem options, broken in 2.6.2.
+ -- Restore default behavior of allocating cores to jobs on a cyclic basis
+    across the sockets unless SelectTypeParameters=CR_CORE_DEFAULT_DIST_BLOCK
+    or user specifies other distribution options.
+ -- Enforce JobRequeue configuration parameter on node failure. Previously
+    always requeued the job.
+ -- acct_gather_energy/ipmi - Add delay before retry on read error.
  -- select/cons_res with GRES and multiple threads per core, fix possible
     infinite loop.
 
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index f741a1d7c5ab18e446c795a5587c1e3e2cf5a01c..c3e564d5a1f5c2b50adf0b6b3f780b148340a721 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1258,6 +1258,8 @@ Supported values are "YES" and "NO".  The default value is "NO".
 \fBPriorityFlags\fR
 Flags to modify priority behavior
 Applicable only if PriorityType=priority/multifactor.
+The keywords below have no associated value
+(e.g. "PriorityFlags=ACCRUE_ALWAYS,SMALL_RELATIVE_TO_TIME").
 .RS
 .TP 17
 \fBACCRUE_ALWAYS\fR
@@ -1746,10 +1748,17 @@ Multiple options may be comma separated.
 .TP
 \fBdefault_queue_depth=#\fR
 The default number of jobs to attempt scheduling (i.e. the queue depth) when a
-running job completes or other routine actions occur. The full queue will be
-tested on a less frequent basis. The default value is 100.
+running job completes or other routine actions occur.
+The full queue will be tested on a less frequent basis.
+The default value is 100.
 In the case of large clusters (more than 1000 nodes), configuring a relatively
 small value may be desirable.
+Specifying a large value (say 1000 or higher) can be expected to result in
+poor system responsiveness since this scheduling logic will not release
+locks for other events to occur.
+It would be better to let the backfill scheduler process a larger number of jobs
+(see \fBmax_job_bf\fR, \fBbf_continue\fR  and other options here for more
+information).
 .TP
 \fBdefer\fR
 Setting this option will avoid attempting to schedule each job
@@ -1804,6 +1813,12 @@ This option applies only to \fBSchedulerType=sched/backfill\fR.
 The number of minutes into the future to look when considering jobs to schedule.
 Higher values result in more overhead and less responsiveness.
 The default value is 1440 minutes (one day).
+A value at least as long as the highest allowed time limit is generally
+advisable to prevent job starvation.
+In order limit the amount of data managed by the backfill scheduler,
+if the value of \fBbf_window\fR is increased, then it is generally advisable
+to also increase \fBbf_resolution\fR.
+if 
 This option applies only to \fBSchedulerType=sched/backfill\fR.
 .TP
 \fBmax_job_bf=#\fR
@@ -1813,9 +1828,6 @@ Higher values result in more overhead and less responsiveness.
 Until an attempt is made to backfill schedule a job, its expected
 initiation time value will not be set.
 The default value is 50.
-In the case of large clusters (more than 1000 nodes) configured with
-\fBSelectType=select/cons_res\fR, configuring a relatively small value may be
-desirable.
 This option applies only to \fBSchedulerType=sched/backfill\fR.
 .TP
 \fBmax_depend_depth=#\fR
diff --git a/src/common/slurm_protocol_util.c b/src/common/slurm_protocol_util.c
index 92a42cb76e4a58e76322c407b227d86ca5c636eb..33c880b3ad3a2f487accab7ae8bdb9ba6bf6b35f 100644
--- a/src/common/slurm_protocol_util.c
+++ b/src/common/slurm_protocol_util.c
@@ -125,7 +125,7 @@ int check_header_version(header_t * header)
  */
 void init_header(header_t *header, slurm_msg_t *msg, uint16_t flags)
 {
-	memset(header, 0, sizeof(header));
+	memset(header, 0, sizeof(header_t));
 	/* Since the slurmdbd could talk to a host of different
 	   versions of slurm this needs to be kept current when the
 	   protocol version changes. */
diff --git a/src/common/slurm_resource_info.c b/src/common/slurm_resource_info.c
index 17c7641d06187c0ac2fbfb7e3be5f35d943021aa..c7feeaf01ad72acf172f839fa247c53efbadfb7b 100644
--- a/src/common/slurm_resource_info.c
+++ b/src/common/slurm_resource_info.c
@@ -305,6 +305,7 @@ int slurm_verify_cpu_bind(const char *arg, char **cpu_bind,
 		           (strncasecmp(tok, "mapcpu", 6) == 0)) {
 			char *list;
 			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");  /* THIS IS NOT REDUNDANT */
 			_clear_then_set((int *)flags, bind_bits, CPU_BIND_MAP);
 			xfree(*cpu_bind);
 			if (list && *list) {
@@ -319,6 +320,7 @@ int slurm_verify_cpu_bind(const char *arg, char **cpu_bind,
 		           (strncasecmp(tok, "maskcpu", 7) == 0)) {
 			char *list;
 			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");  /* THIS IS NOT REDUNDANT */
 			_clear_then_set((int *)flags, bind_bits, CPU_BIND_MASK);
 			xfree(*cpu_bind);
 			if (list && *list) {
@@ -337,6 +339,7 @@ int slurm_verify_cpu_bind(const char *arg, char **cpu_bind,
 		           (strncasecmp(tok, "mapldom", 7) == 0)) {
 			char *list;
 			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");  /* THIS IS NOT REDUNDANT */
 			_clear_then_set((int *)flags, bind_bits,
 					CPU_BIND_LDMAP);
 			xfree(*cpu_bind);
@@ -352,6 +355,7 @@ int slurm_verify_cpu_bind(const char *arg, char **cpu_bind,
 		           (strncasecmp(tok, "maskldom", 8) == 0)) {
 			char *list;
 			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");  /* THIS IS NOT REDUNDANT */
 			_clear_then_set((int *)flags, bind_bits,
 					CPU_BIND_LDMASK);
 			xfree(*cpu_bind);
@@ -498,6 +502,7 @@ int slurm_verify_mem_bind(const char *arg, char **mem_bind,
 		           (strncasecmp(tok, "mapmem", 6) == 0)) {
 			char *list;
 			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");  /* THIS IS NOT REDUNDANT */
 			_clear_then_set((int *)flags, bind_bits, MEM_BIND_MAP);
 			xfree(*mem_bind);
 			if (list && *list) {
@@ -511,6 +516,7 @@ int slurm_verify_mem_bind(const char *arg, char **mem_bind,
 		           (strncasecmp(tok, "maskmem", 7) == 0)) {
 			char *list;
 			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");  /* THIS IS NOT REDUNDANT */
 			_clear_then_set((int *)flags, bind_bits, MEM_BIND_MASK);
 			xfree(*mem_bind);
 			if (list && *list) {
diff --git a/src/common/timers.c b/src/common/timers.c
index 04be43ff606a020c6a00d5a16f2e44eefac81367..01d69a37be13e81c283c92c23b813b28033424d4 100644
--- a/src/common/timers.c
+++ b/src/common/timers.c
@@ -63,13 +63,13 @@ extern void slurm_diff_tv_str(struct timeval *tv1, struct timeval *tv2,
 		if (!limit)
 			limit = 1000000;
 		if (*delta_t > limit) {
-			if (!localtime_r(&tv2->tv_sec, &tm))
+			if (!localtime_r(&tv1->tv_sec, &tm))
 				fprintf(stderr, "localtime_r() failed\n");
 			if (strftime(p, sizeof(p), "%T", &tm) == 0)
 				fprintf(stderr, "strftime() returned 0\n");
 			verbose("Warning: Note very large processing "
 				"time from %s: %s began=%s.%3.3d",
-				from, tv_str, p, (int)(tv2->tv_usec / 1000));
+				from, tv_str, p, (int)(tv1->tv_usec / 1000));
 		}
 	}
 }
diff --git a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c
index 5a923c2a95b52078f9e6d7b44ed9126a288129db..0f744fc12160a880fd80534f628375de9aad0e2f 100644
--- a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c
+++ b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c
@@ -528,10 +528,11 @@ static int _thread_update_node_energy(void)
 static int _thread_init(void)
 {
 	static bool first = true;
+	static bool first_init = SLURM_FAILURE;
 	int rc = SLURM_SUCCESS;
 
 	if (!first)
-		return rc;
+		return first_init;
 	first = false;
 
 	if (_init_ipmi_config() != SLURM_SUCCESS) {
@@ -561,6 +562,9 @@ static int _thread_init(void)
 
 	if (debug_flags & DEBUG_FLAG_ENERGY)
 		info("%s thread init", plugin_name);
+
+	first_init = SLURM_SUCCESS;
+
 	return rc;
 }
 
@@ -617,7 +621,10 @@ static void *_thread_ipmi_run(void *no_data)
 	//loop until slurm stop
 	while (!flag_energy_accounting_shutdown) {
 		time_lost = (int)(time(NULL) - last_update_time);
-		_task_sleep(slurm_ipmi_conf.freq - time_lost);
+		if (time_lost <= slurm_ipmi_conf.freq)
+			_task_sleep(slurm_ipmi_conf.freq - time_lost);
+		else
+			_task_sleep(1);
 		slurm_mutex_lock(&ipmi_mutex);
 		_thread_update_node_energy();
 		slurm_mutex_unlock(&ipmi_mutex);
@@ -806,8 +813,8 @@ extern int acct_gather_energy_p_get_data(enum acct_energy_type data_type,
 	case ENERGY_DATA_JOULES_TASK:
 		slurm_mutex_lock(&ipmi_mutex);
 		if (_is_thread_launcher()) {
-			_thread_init();
-			_thread_update_node_energy();
+			if (_thread_init() == SLURM_SUCCESS)
+				_thread_update_node_energy();
 		} else
 			_get_joules_task(10); /* Since we don't have
 						 access to the
diff --git a/src/plugins/mpi/pmi2/spawn.c b/src/plugins/mpi/pmi2/spawn.c
index 98b1194c88f70535cce6da734d7561680d7391b0..7553b02c80f14b3535f818c60792abe46db2d3b4 100644
--- a/src/plugins/mpi/pmi2/spawn.c
+++ b/src/plugins/mpi/pmi2/spawn.c
@@ -525,17 +525,17 @@ static int
 _exec_srun_multiple(spawn_req_t *req, char **env)
 {
 	int argc, ntasks, i, j, spawn_cnt, fd;
-	char **argv = NULL, *multi_prog = NULL, *buf = NULL;
+	char **argv = NULL, *buf = NULL;
 	spawn_subcmd_t *subcmd = NULL;
+	char fbuf[128];
 
 	debug3("mpi/pmi2: in _exec_srun_multiple");
 	/* create a tmp multi_prog file */
 	/* TODO: how to delete the file? */
-	multi_prog = tempnam(NULL, NULL);
-	fd = open(multi_prog, O_WRONLY | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+	sprintf(fbuf, "/tmp/%d.XXXXXX", getpid());
+	fd = mkstemp(fbuf);
 	if (fd < 0) {
-		error("mpi/pmi2: failed to open multi-prog file %s: %m",
-		      multi_prog);
+		error("mpi/pmi2: failed to open multi-prog file %s: %m", fbuf);
 		return SLURM_ERROR;
 	}
 	ntasks = 0;
@@ -576,7 +576,7 @@ _exec_srun_multiple(spawn_req_t *req, char **env)
 			   job_info.srun_opt->nodelist);
 	}
 	argv[j ++] = "--multi-prog";
-	argv[j ++] = multi_prog;
+	argv[j ++] = fbuf;
 	argv[j ++] = NULL;
 
 	debug3("mpi/mpi2: to execve");
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index cfa9f5d4d6aef0387e4cc3b075332770546730f9..62d1c9d6271d6f6f77e43d3fd0ab97095f41f4d7 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -843,6 +843,7 @@ static int _attempt_backfill(void)
 		later_start = now;
  TRY_LATER:
 		if ((time(NULL) - sched_start) >= sched_timeout) {
+			uint32_t save_job_id = job_ptr->job_id;
 			uint32_t save_time_limit = job_ptr->time_limit;
 			job_ptr->time_limit = orig_time_limit;
 			if (debug_flags & DEBUG_FLAG_BACKFILL) {
@@ -862,6 +863,18 @@ static int _attempt_backfill(void)
 				rc = 1;
 				break;
 			}
+
+			/* With bf_continue configured, the original job could
+			 * have been scheduled or cancelled and purged.
+			 * Revalidate job the record here. */
+			if ((job_ptr->magic  != JOB_MAGIC) ||
+			    (job_ptr->job_id != save_job_id))
+				continue;
+			if (!IS_JOB_PENDING(job_ptr))
+				continue;
+			if (!avail_front_end(job_ptr))
+				continue;	/* No available frontend */
+
 			job_ptr->time_limit = save_time_limit;
 			/* Reset backfill scheduling timers, resume testing */
 			sched_start = time(NULL);
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index d793099ea4f6a74907108baa1619680c809d630a..2c10e1f842ae37099b3b47bc3510157e0040cfe0 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -675,8 +675,7 @@ static int _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 	bool *sock_used, *sock_avoid;
 	bool alloc_cores = false, alloc_sockets = false;
 	uint16_t ntasks_per_core = 0xffff, ntasks_per_socket = 0xffff;
-	int error_code = SLURM_SUCCESS, socket_best_fit;
-	uint32_t total_cpus, *cpus_cnt;
+	int error_code = SLURM_SUCCESS;
 
 	if ((job_res == NULL) || (job_res->core_bitmap == NULL))
 		return error_code;
@@ -749,48 +748,33 @@ static int _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 		core_cnt = 0;
 		cpus  = job_res->cpus[i];
 
-		/* Pack job onto socket(s) with best fit */
-		socket_best_fit = -1;
-		total_cpus = 0;
-		cpus_cnt = xmalloc(sizeof(uint32_t)* sockets);
-		for (s = 0; s < sockets; s++) {
-			for (j = sock_start[s]; j < sock_end[s]; j++) {
-				if (bit_test(core_map, j))
-					cpus_cnt[s] += vpus;
-			}
-			total_cpus += cpus_cnt[s];
-		}
-		for (s = 0; s < sockets && total_cpus > cpus; s++) {
-			if ((ntasks_per_socket != 0xffff) &&
-			    (cpus_cnt[s] > ntasks_per_socket)) {
-				int x_cpus = cpus_cnt[s] - ntasks_per_socket;
-				x_cpus = MIN(x_cpus, (total_cpus - cpus));
-				cpus_cnt[s] -= x_cpus;
-				total_cpus  -= x_cpus;
-			}
-			if ((cpus_cnt[s] >= cpus) &&
-			    ((socket_best_fit == -1) ||
-			     (cpus_cnt[s] < cpus_cnt[socket_best_fit])))
-				socket_best_fit = s;
-		}
-		if (socket_best_fit != -1) {
-			/* Use one socket with best fit, avoid all others */
+		if (ntasks_per_socket != 0xffff) {
+			int x_cpus;
+			uint32_t total_cpus = 0;
+			uint32_t *cpus_cnt = xmalloc(sizeof(uint32_t)* sockets);
 			for (s = 0; s < sockets; s++) {
-				if (s != socket_best_fit)
-					sock_avoid[s] = true;
+				for (j = sock_start[s]; j < sock_end[s]; j++) {
+					if (bit_test(core_map, j))
+						cpus_cnt[s] += vpus;
+				}
+				total_cpus += cpus_cnt[s];
+			}
+			for (s = 0; s < sockets && total_cpus > cpus; s++) {
+				if (cpus_cnt[s] > ntasks_per_socket) {
+					x_cpus = cpus_cnt[s] -ntasks_per_socket;
+					cpus_cnt[s] = ntasks_per_socket;
+					total_cpus -= x_cpus;
+				}
 			}
-			total_cpus = cpus;
-		} else if (ntasks_per_socket != 0xffff) {
-			/* Avoid sockets that can't start ntasks */
 			for (s = 0; s < sockets && total_cpus > cpus; s++) {
 				if ((cpus_cnt[s] <= ntasks_per_socket) &&
-				    ((total_cpus - cpus_cnt[s]) >= cpus)) {
+				    (total_cpus - cpus_cnt[s] >= cpus)) {
 					sock_avoid[s] = true;
 					total_cpus -= cpus_cnt[s];
 				}
 			}
+			xfree(cpus_cnt);
 		}
-		xfree(cpus_cnt);
 
 		while (cpus > 0) {
 			uint16_t prev_cpus = cpus;
diff --git a/src/plugins/task/cgroup/task_cgroup_cpuset.c b/src/plugins/task/cgroup/task_cgroup_cpuset.c
index e0f35c3baed71fd8b08422867d22b99ca7d965d9..28369fa0d0875986bbd7d6c66c119883e8b5d84f 100644
--- a/src/plugins/task/cgroup/task_cgroup_cpuset.c
+++ b/src/plugins/task/cgroup/task_cgroup_cpuset.c
@@ -61,9 +61,6 @@
 #include <hwloc.h>
 #include <hwloc/glibc-sched.h>
 
-static bool cpuset_prefix_set = false;
-static char *cpuset_prefix = "";
-
 # if HWLOC_API_VERSION <= 0x00010000
 /* After this version the cpuset structure and all it's functions
  * changed to bitmaps.  So to work with old hwloc's we just to the
@@ -101,6 +98,9 @@ static inline int hwloc_bitmap_asprintf(char **str, hwloc_bitmap_t bitmap)
 #define PATH_MAX 256
 #endif
 
+static bool cpuset_prefix_set = false;
+static char *cpuset_prefix = "";
+
 static char user_cgroup_path[PATH_MAX];
 static char job_cgroup_path[PATH_MAX];
 static char jobstep_cgroup_path[PATH_MAX];
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index b27a497f02033da0921d2853675e653c6cf8e366..50939c224c665dd26331b504efb873a466429d81 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -2200,7 +2200,8 @@ extern int kill_job_by_front_end_name(char *node_name)
 		} else if (IS_JOB_RUNNING(job_ptr) || suspended) {
 			job_count++;
 			if (job_ptr->batch_flag && job_ptr->details &&
-				   (job_ptr->details->requeue > 0)) {
+			    slurmctld_conf.job_requeue &&
+			    (job_ptr->details->requeue > 0)) {
 				char requeue_msg[128];
 
 				srun_node_fail(job_ptr->job_id, node_name);
@@ -2434,6 +2435,7 @@ extern int kill_running_job_by_node_name(char *node_name)
 				excise_node_from_job(job_ptr, node_ptr);
 				job_post_resize_acctg(job_ptr);
 			} else if (job_ptr->batch_flag && job_ptr->details &&
+				   slurmctld_conf.job_requeue &&
 				   (job_ptr->details->requeue > 0)) {
 				char requeue_msg[128];
 
@@ -8798,7 +8800,8 @@ static void _purge_missing_jobs(int node_inx, time_t now)
 		    (job_ptr->start_time       < startup_time)	&&
 		    (node_inx == bit_ffs(job_ptr->node_bitmap))) {
 			bool requeue = false;
-			if (job_ptr->start_time < node_ptr->boot_time)
+			if (slurmctld_conf.job_requeue &&
+			    (job_ptr->start_time < node_ptr->boot_time))
 				requeue = true;
 			info("Batch JobId=%u missing from node 0",
 			     job_ptr->job_id);
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index 1b0895824598b366eb8ae597e0d1e3fc010fb11e..a3579771247bfb90f951391ef1d474ee21cd9c6c 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -618,13 +618,13 @@ _fill_registration_msg(slurm_node_registration_status_msg_t *msg)
 
 	if (first_msg) {
 		first_msg = false;
-		info("Procs=%u Boards=%u Sockets=%u Cores=%u Threads=%u "
+		info("CPUs=%u Boards=%u Sockets=%u Cores=%u Threads=%u "
 		     "Memory=%u TmpDisk=%u Uptime=%u",
 		     msg->cpus, msg->boards, msg->sockets, msg->cores,
 		     msg->threads, msg->real_memory, msg->tmp_disk,
 		     msg->up_time);
 	} else {
-		debug3("Procs=%u Boards=%u Sockets=%u Cores=%u Threads=%u "
+		debug3("CPUs=%u Boards=%u Sockets=%u Cores=%u Threads=%u "
 		       "Memory=%u TmpDisk=%u Uptime=%u",
 		       msg->cpus, msg->boards, msg->sockets, msg->cores,
 		       msg->threads, msg->real_memory, msg->tmp_disk,
@@ -840,7 +840,7 @@ _read_config(void)
 		if (cf->fast_schedule) {
 			info("Node configuration differs from hardware: "
 			     "CPUs=%u:%u(hw) Boards=%u:%u(hw) "
-			     "Sockets=%u:%u(hw) CoresPerSocket=%u:%u(hw) "
+			     "SocketsPerBoard=%u:%u(hw) CoresPerSocket=%u:%u(hw) "
 			     "ThreadsPerCore=%u:%u(hw)",
 			     conf->cpus,    conf->actual_cpus,
 			     conf->boards,  conf->actual_boards,
@@ -855,7 +855,7 @@ _read_config(void)
 			      "the bitmaps the slurmctld must create before "
 			      "the slurmd registers.\n"
 			      "   CPUs=%u:%u(hw) Boards=%u:%u(hw) "
-			      "Sockets=%u:%u(hw) CoresPerSocket=%u:%u(hw) "
+			      "SocketsPerBoard=%u:%u(hw) CoresPerSocket=%u:%u(hw) "
 			      "ThreadsPerCore=%u:%u(hw)",
 			      conf->cpus,    conf->actual_cpus,
 			      conf->boards,  conf->actual_boards,
@@ -1174,7 +1174,7 @@ _print_config(void)
 	            &conf->actual_threads,
 	            &conf->block_map_size,
 	            &conf->block_map, &conf->block_map_inv);
-	printf("CPUs=%u Boards=%u Sockets=%u CoresPerSocket=%u "
+	printf("CPUs=%u Boards=%u SocketsPerBoard=%u CoresPerSocket=%u "
 	       "ThreadsPerCore=%u ",
 	       conf->actual_cpus, conf->actual_boards, conf->actual_sockets,
 	       conf->actual_cores, conf->actual_threads);
diff --git a/testsuite/expect/test1.89 b/testsuite/expect/test1.89
index dc712e675dda0e9162f18398a32d1aee448a5793..13c6c97fdc0e23da356216dc33fd42b1ea1a64bb 100755
--- a/testsuite/expect/test1.89
+++ b/testsuite/expect/test1.89
@@ -127,12 +127,15 @@ expect {
 #
 # Reading a second prompt is required by some versions of Expect
 #
-if { 0 } {
-	expect {
-		-re $prompt {
-		}
+set timeout 1
+expect {
+	-re $prompt {
+		exp_continue
+	}
+	timeout {
 	}
 }
+set timeout 30
 
 #
 # Run a job step to get allocated processor count and affinity
diff --git a/testsuite/expect/test1.90 b/testsuite/expect/test1.90
index e0622c92c40135ae2e31f2145d6088340bd3615b..bc10c07f57237f62319891142c75178b15c8fd16 100755
--- a/testsuite/expect/test1.90
+++ b/testsuite/expect/test1.90
@@ -136,6 +136,19 @@ expect {
 	}
 }
 
+#
+# Reading a second prompt is required by some versions of Expect
+#
+set timeout 1
+expect {
+	-re $prompt {
+		exp_continue
+	}
+	timeout {
+	}
+}
+set timeout 30
+
 #
 # Run a job step to get allocated processor count and affinity
 #