diff --git a/META b/META
index fc86b09429b7ca5150568069af93c02d7b3fc200..3cd8885a1923f015639354b797364762f18de8c2 100644
--- a/META
+++ b/META
@@ -8,10 +8,10 @@
   Meta:		1
   Name:		slurm
   Major:	2
-  Minor:	5
+  Minor:	6
   Micro:	0
-  Version:	2.5.0
-  Release:	1
+  Version:	2.6.0
+  Release:	0-pre1
 
 ##
 #  When changing API_CURRENT update src/common/slurm_protocol_common.h
@@ -21,6 +21,6 @@
 #  and _get_slurm_version()
 #  need to be updated also when changes are added also.
 ##
-  API_CURRENT:	25
+  API_CURRENT:	26
   API_AGE:	0
   API_REVISION:	0
diff --git a/NEWS b/NEWS
index 88852010c4239de57146742de50d520a9e17a369..3a32a40eedea1a2739153a75c2e66b985d96664b 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,13 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 2.6.0-pre1
+=============================
+ -- Add "state" field to job step information reported by scontrol.
+ -- Notify srun to retry step creation upon completion of other job steps
+    rather than polling. This results in much faster throughput for job step
+    execution with --exclusive option.
+
 * Changes in SLURM 2.5.1
 ========================
  -- Correction to hostlist sorting for hostnames that contain two numeric
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index 38105e11b1568278a94bddc19e27e2fbb9667b89..5309e5c8d1a1b1d3c21066bfa3b95e9aa3c89597 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -1,10 +1,10 @@
-RELEASE NOTES FOR SLURM VERSION 2.5
-8 November 2012
+RELEASE NOTES FOR SLURM VERSION 2.6
+8 December 2012
 
 
 IMPORTANT NOTE:
 If using the slurmdbd (SLURM DataBase Daemon) you must update this first.
-The 2.5 slurmdbd will work with SLURM daemons of version 2.3 and above.
+The 2.6 slurmdbd will work with SLURM daemons of version 2.4 and above.
 You will not need to update all clusters at the same time, but it is very
 important to update slurmdbd first and having it running before updating
 any other clusters making use of it.  No real harm will come from updating
@@ -18,56 +18,23 @@ innodb_buffer_pool_size=64M
 under the [mysqld] reference in the my.cnf file and restarting the mysqld.
 This is needed when converting large tables over to the new database schema.
 
-SLURM can be upgraded from version 2.3 or 2.4 to version 2.5 without loss of
+SLURM can be upgraded from version 2.4 or 2.5 to version 2.6 without loss of
 jobs or other state information. Upgrading directly from an earlier version of
 SLURM will result in loss of state information.
 
 
 HIGHLIGHTS
 ==========
-- Major performance improvements for high-throughput computing.
-- Added "boards" count to node information and "boards_per_node" to job request
-  and job information. Optimize resource allocation to minimize number of
-  boards used by a job.
-- Added support for IBM Parallel Environment (PE) including the launching of
-  jobs using either the srun or poe command.
-- Add support for advanced reservation for specific cores rather than whole
-  nodes.
-- Added srun option "--cpu-freq" to enable user control over the job's CPU
-  frequency and thus it's power consumption.
-- Added priority/multifactor2 plugin supporting ticket based shares.
-- Added gres/mic plugin supporting Intel Many Integrated Core (MIC) processors.
-- Added launch plugin to support srun interface to launch tasks using different
-  methods like IBM's poe or Cray's aprun.
 
 CONFIGURATION FILE CHANGES (see "man slurm.conf" for details)
 =============================================================
-- Added node configuration parameter of "Boards".
-- Added DebugFlag option of "Switch" to log switch plugin details.
-- Added "AcctGatherEnergy" configuration parameter to identify the plugin
-  to be used to gather energy consumption data for jobs.
-- When running with multiple slurmd daemons per node, enable specifying a
-  range of ports on a single line of the node configuration in slurm.conf.
-- New SelectType plugin of "serial" provides highly optimized throughput for
-  serial (single CPU) jobs.
-- New SwitchType plugin of "nrt" provides support for IBM Network Resource
-  Table API.
-- Added configuration option of "LaunchType" to control the mechanism used for
-  launching application tasks. Available plugins include "slurm" (native SLURM
-  mode), "runjob" (for use with IBM BlueGene/Q systems) and "poe" (for use with
-  IBM Parallel Environment).
+- Much faster throughput for job step execution with --exclusive option. The
+  srun process is notified when resources become available rather than periodic
+  polling.
 
 COMMAND CHANGES (see man pages for details)
 ===========================================
-- Added sinfo option of "-T" to print reservation information.
-- Added LicensesUsed field to output of "scontrol show configuration" command.
-  Output is of the form "name:used/total".
-- Add reservation flag of "Part_Nodes" to allocate all nodes in a partition to
-  a reservation and automatically change the reservation when nodes are
-  added to or removed from the reservation.
-- sinfo partition field size will be set the the length of the longest
-  partition name by default.
-- Deprecation of sacct --dump --fdump.  This will go away in 2.6 completely.
+Added step "State" field to scontrol output.
 
 OTHER CHANGES
 =============
@@ -77,57 +44,17 @@ API CHANGES
 
 Changed members of the following structs
 ========================================
-Added boards_per_node to job_info and job_desc_msg_t.
-Added acct_gather_energy_t, boards and cpu_load to node_info_t.
-Added step_signal to slurm_step_launch_callbacks_t - for signaling steps that
-      are perhaps not running as srun.
-Added acct_gather_energy_type, acct_gather_node_freq launch_type, licenses,
-      and licenses_used to slurm_ctl_conf_t
-Added ntasks_per_board, boards_per_node, sockets_per_board to slurm_job_info_t
-Added ntasks_per_board, boards_per_node job_desc_msg_t
-Added cpu_freq to slurm_step_ctx_params_t
-Added cpu_freq to slurm_step_launch_params_t
-Added cpu_freq to job_step_info_t
-Added (*step_signal) to slurm_step_launch_callbacks_t
-Added core_cnt to reserve_info_t
-Added core_cnt to resv_desc_msg_t
-Added actual_boards to slurmd_status_t
-Added act_cpufreq, consumed_energy, and req_cpufreq to slurmdb_stats_t
+Added "state" field to job_step_info_t.
 
 Added the following struct definitions
 ======================================
-acct_gather_energy_t
-acct_gather_node_resp_msg_t
-Changed job_info_t to slurm_job_info_t since IBM PE machines have a job_info_t
-	structure already defined. job_info_t is defined as slurm_job_info_t on
-	will still work in a non IBM PE environment, but shouldn't be used in
-	future code.
 
 Changed the following enums and #defines
 ========================================
-added #define DEBUG_FLAG_SWITCH
-added #define DEBUG_FLAG_ENERGY
-added #define CPU_FREQ_RANGE_FLAG
-added #define CPU_FREQ_LOW
-added #define CPU_FREQ_MEDIUM
-added #define CPU_FREQ_HIGH
-added #define CR_BOARD
-added #define RESERVE_FLAG_PART_NODES
-added #define RESERVE_FLAG_NO_PART_NODES
-added #define RECONFIG_KEEP_PART_STAT
-added enum acct_energy_type
-added SELECT_JOBDATA_CONFIRMED to enum select_jobdata_type
-added JOBACCT_DATA_ACT_CPUFREQ and JOBACCT_DATA_CONSUMED_ENERGY
-      to enum jobacct_data_type
-Added CPU_BIND_TO_BOARDS to enum cpu_bind_type
 
 Added the following API's
 =========================
-slurm_step_launch_add - added for adding tasks to steps that were
-		       	previously started.  (Note: it currently has only been
-			tested with user managed io jobs.)
-slurm_init_trigger_msg - added to initialize trigger clear/update message
 
 Changed the following API's
 ===========================
-slurm_step_ctx_daemon_per_node_hack - ported to newer poe interface
+
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 43fab380c659d612ece2f0fdee3e4132c2cf94c9..a4f9b50197c917af2425aafdf5a595aef857512e 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -1407,6 +1407,7 @@ typedef struct {
 						* slurm_get_select_jobinfo()
 						*/
 	time_t start_time;	/* step start time */
+	uint16_t state;		/* state of the step, see enum job_states */
 	uint32_t step_id;	/* step ID */
 	uint32_t time_limit;	/* step time limit */
 	uint32_t user_id;	/* user the job runs as */
@@ -2457,6 +2458,17 @@ extern void slurm_step_ctx_params_t_init PARAMS((slurm_step_ctx_params_t *ptr));
 extern slurm_step_ctx_t *slurm_step_ctx_create PARAMS(
 	(const slurm_step_ctx_params_t *step_params));
 
+/*
+ * slurm_step_ctx_create_timeout - Create a job step and its context.
+ * IN step_params - job step parameters
+ * IN timeout - in microseconds
+ * RET the step context or NULL on failure with slurm errno set
+ * NOTE: Free allocated memory using slurm_step_ctx_destroy.
+ */
+extern slurm_step_ctx_t *
+slurm_step_ctx_create_timeout PARAMS(
+	(const slurm_step_ctx_params_t *step_params, int timeout));
+
 /*
  * slurm_step_ctx_create_no_alloc - Create a job step and its context without
  *                                  getting an allocation.
diff --git a/src/api/allocate.c b/src/api/allocate.c
index 4d3164ddccc36623bfad01de874842f9cf293a25..2f8cb0bde9d654b9613cd4c659bb2aba28412ebc 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -324,7 +324,7 @@ int slurm_job_will_run (job_desc_msg_t *req)
 			return SLURM_PROTOCOL_ERROR;
 		break;
 	case RESPONSE_JOB_WILL_RUN:
-		if(cluster_flags & CLUSTER_FLAG_BG)
+		if (cluster_flags & CLUSTER_FLAG_BG)
 			type = "cnodes";
 		will_run_resp = (will_run_response_msg_t *) resp_msg.data;
 		slurm_make_time_str(&will_run_resp->start_time,
diff --git a/src/api/job_step_info.c b/src/api/job_step_info.c
index 8ddcc6c5f95a2484a059c41ad9937b4d263cf0ba..1295ed7198174001ca1a9792de2984d58a8948d2 100644
--- a/src/api/job_step_info.c
+++ b/src/api/job_step_info.c
@@ -158,9 +158,9 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		secs2time_str ((time_t)job_step_ptr->time_limit * 60,
 				limit_str, sizeof(limit_str));
 	snprintf(tmp_line, sizeof(tmp_line),
-		"StepId=%u.%u UserId=%u StartTime=%s TimeLimit=%s",
-		job_step_ptr->job_id, job_step_ptr->step_id,
-		job_step_ptr->user_id, time_str, limit_str);
+		 "StepId=%u.%u UserId=%u StartTime=%s TimeLimit=%s",
+		 job_step_ptr->job_id, job_step_ptr->step_id,
+		 job_step_ptr->user_id, time_str, limit_str);
 	out = xstrdup(tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -168,6 +168,10 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		xstrcat(out, "\n   ");
 
 	/****** Line 2 ******/
+	snprintf(tmp_line, sizeof(tmp_line),
+		 "State=%s ",
+		 job_state_string(job_step_ptr->state));
+	xstrcat(out, tmp_line);
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		char *io_nodes;
 		select_g_select_jobinfo_get(job_step_ptr->select_jobinfo,
@@ -199,7 +203,6 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		xstrcat(out, "\n   ");
 
 	/****** Line 3 ******/
-
 	if (cluster_flags & CLUSTER_FLAG_BGQ) {
 		uint32_t nodes = 0;
 		select_g_select_jobinfo_get(job_step_ptr->select_jobinfo,
@@ -235,10 +238,10 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		xstrcat(out, "\n   ");
 
 	/****** Line 5 ******/
-	if (job_step_ptr->cpu_freq == NO_VAL)
+	if (job_step_ptr->cpu_freq == NO_VAL) {
 		snprintf(tmp_line, sizeof(tmp_line), 
 			 "CPUFreqReq=Default\n\n");
-	else if (job_step_ptr->cpu_freq & CPU_FREQ_RANGE_FLAG) {
+	} else if (job_step_ptr->cpu_freq & CPU_FREQ_RANGE_FLAG) {
 		switch (job_step_ptr->cpu_freq) 
 		{
 		case CPU_FREQ_LOW :
@@ -257,10 +260,10 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 			snprintf(tmp_line, sizeof(tmp_line),
 				 "CPUFreqReq=Unknown\n\n");
 		}
-	}
-	else 
+	} else {
 		snprintf(tmp_line, sizeof(tmp_line),
 			 "CPUFreqReq=%u\n\n", job_step_ptr->cpu_freq);
+	}
 	xstrcat(out, tmp_line);
 
 	return out;
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index f7e038a3f290b50bbf77c7d5da7b58a11c6eea66..74f6468e1c5094fd79e7cddea8f131177a286314 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -42,6 +42,7 @@
 #endif
 
 #include <errno.h>
+#include <poll.h>
 #include <pthread.h>
 #include <stdarg.h>
 #include <stdlib.h>
@@ -148,8 +149,7 @@ slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params)
 	short port = 0;
 	int errnum = 0;
 
-	/* First copy the user's step_params into a step request
-	 * struct */
+	/* First copy the user's step_params into a step request struct */
 	step_req = _create_step_request(step_params);
 
 	/* We will handle the messages in the step_launch.c mesage handler,
@@ -189,6 +189,80 @@ fail:
 	return (slurm_step_ctx_t *)ctx;
 }
 
+/*
+ * slurm_step_ctx_create - Create a job step and its context.
+ * IN step_params - job step parameters
+ * IN timeout - in microseconds
+ * RET the step context or NULL on failure with slurm errno set
+ * NOTE: Free allocated memory using slurm_step_ctx_destroy.
+ */
+extern slurm_step_ctx_t *
+slurm_step_ctx_create_timeout (const slurm_step_ctx_params_t *step_params,
+			       int timeout)
+{
+	struct slurm_step_ctx_struct *ctx = NULL;
+	job_step_create_request_msg_t *step_req = NULL;
+	job_step_create_response_msg_t *step_resp = NULL;
+	int rc, time_left = timeout / 1000;
+	int sock = -1;
+	short port = 0;
+	int errnum = 0;
+
+	/* First copy the user's step_params into a step request struct */
+	step_req = _create_step_request(step_params);
+
+	/* We will handle the messages in the step_launch.c mesage handler,
+	 * but we need to open the socket right now so we can tell the
+	 * controller which port to use.
+	 */
+	if (net_stream_listen(&sock, &port) < 0) {
+		errnum = errno;
+		error("unable to initialize step context socket: %m");
+		slurm_free_job_step_create_request_msg(step_req);
+		goto fail;
+	}
+	step_req->port = port;
+	step_req->host = xshort_hostname();
+
+	rc = slurm_job_step_create(step_req, &step_resp);
+	if ((rc < 0) &&
+	    ((errno == ESLURM_NODES_BUSY) ||
+	     (errno == ESLURM_PORTS_BUSY) ||
+	     (errno == ESLURM_INTERCONNECT_BUSY))) {
+		struct pollfd fds;
+		fds.fd = sock;
+		fds.events = POLLIN;
+		while ((rc = poll(&fds, 1, time_left)) <= 0) {
+			if ((errno == EINTR) || (errno == EAGAIN))
+				continue;
+			break;
+		}
+		rc = slurm_job_step_create(step_req, &step_resp);
+	}
+
+	if ((rc < 0) || (step_resp == NULL)) {
+		errnum = errno;
+		slurm_free_job_step_create_request_msg(step_req);
+		close(sock);
+		goto fail;
+	}
+
+	ctx = xmalloc(sizeof(struct slurm_step_ctx_struct));
+	ctx->launch_state = NULL;
+	ctx->magic	= STEP_CTX_MAGIC;
+	ctx->job_id	= step_req->job_id;
+	ctx->user_id	= step_req->user_id;
+	ctx->step_req   = step_req;
+	ctx->step_resp	= step_resp;
+	ctx->verbose_level = step_params->verbose_level;
+
+	ctx->launch_state = step_launch_state_create(ctx);
+	ctx->launch_state->slurmctld_socket_fd = sock;
+fail:
+	errno = errnum;
+	return (slurm_step_ctx_t *)ctx;
+}
+
 /*
  * slurm_step_ctx_create_no_alloc - Create a job step and its context without
  *                                  getting an allocation.
diff --git a/src/common/slurm_protocol_common.h b/src/common/slurm_protocol_common.h
index bc1f2f7329f69e984a72138e0807ab27471af46c..751cf1cbc2888f61c54192bd5c4282cfcddb4a4f 100644
--- a/src/common/slurm_protocol_common.h
+++ b/src/common/slurm_protocol_common.h
@@ -70,7 +70,8 @@
  * In slurm_protocol_util.c check_header_version(), and init_header()
  * need to be updated also when changes are added */
 #define SLURM_PROTOCOL_VERSION ((SLURM_API_MAJOR << 8) | SLURM_API_AGE)
-#define SLURM_2_5_PROTOCOL_VERSION SLURM_PROTOCOL_VERSION
+#define SLURM_2_6_PROTOCOL_VERSION SLURM_PROTOCOL_VERSION
+#define SLURM_2_5_PROTOCOL_VERSION ((25 << 8) | 0)
 #define SLURM_2_4_PROTOCOL_VERSION ((24 << 8) | 0)
 #define SLURM_2_3_PROTOCOL_VERSION ((23 << 8) | 0)
 #if 0
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index 5db053f67ab81e146b4a7e337c6159c3ed45a3b5..c683eeaf70eeba9116290ffebcabed315494d92b 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -4054,7 +4054,7 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 	uint32_t uint32_tmp = 0;
 	char *node_inx_str;
 
-	if (protocol_version >= SLURM_2_5_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
 		safe_unpack32(&step->job_id, buffer);
 		safe_unpack32(&step->step_id, buffer);
 		safe_unpack16(&step->ckpt_interval, buffer);
@@ -4063,6 +4063,7 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 		safe_unpack32(&step->cpu_freq, buffer);
 		safe_unpack32(&step->num_tasks, buffer);
 		safe_unpack32(&step->time_limit, buffer);
+		safe_unpack16(&step->state, buffer);
 
 		safe_unpack_time(&step->start_time, buffer);
 		safe_unpack_time(&step->run_time, buffer);
@@ -4084,7 +4085,37 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 		if (select_g_select_jobinfo_unpack(&step->select_jobinfo,
 						   buffer, protocol_version))
 			goto unpack_error;
-	} else if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_5_PROTOCOL_VERSION) {
+		safe_unpack32(&step->job_id, buffer);
+		safe_unpack32(&step->step_id, buffer);
+		safe_unpack16(&step->ckpt_interval, buffer);
+		safe_unpack32(&step->user_id, buffer);
+		safe_unpack32(&step->num_cpus, buffer);
+		safe_unpack32(&step->cpu_freq, buffer);
+		safe_unpack32(&step->num_tasks, buffer);
+		safe_unpack32(&step->time_limit, buffer);
+
+		safe_unpack_time(&step->start_time, buffer);
+		safe_unpack_time(&step->run_time, buffer);
+
+		safe_unpackstr_xmalloc(&step->partition, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->resv_ports, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->network, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->ckpt_dir, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->gres, &uint32_tmp, buffer);
+		if (node_inx_str == NULL)
+			step->node_inx = bitfmt2int("");
+		else {
+			step->node_inx = bitfmt2int(node_inx_str);
+			xfree(node_inx_str);
+		}
+		if (select_g_select_jobinfo_unpack(&step->select_jobinfo,
+						   buffer, protocol_version))
+			goto unpack_error;
+	} else if (protocol_version >= SLURM_2_4_PROTOCOL_VERSION) {
 		safe_unpack32(&step->job_id, buffer);
 		safe_unpack32(&step->step_id, buffer);
 		safe_unpack16(&step->ckpt_interval, buffer);
diff --git a/src/plugins/launch/aprun/launch_aprun.c b/src/plugins/launch/aprun/launch_aprun.c
index e5565bc19d85a42f95cb04b062c6123ed3820c5c..c83a76847f19a6bc4f81c4b0d2f354a20e8f61e5 100644
--- a/src/plugins/launch/aprun/launch_aprun.c
+++ b/src/plugins/launch/aprun/launch_aprun.c
@@ -326,7 +326,8 @@ static void _handle_msg(slurm_msg_t *msg)
 	case SRUN_STEP_SIGNAL:
 		ss = msg->data;
 		debug("received step signal %u RPC", ss->signal);
-		launch_p_fwd_signal(ss->signal);
+		if (ss->signal)
+			launch_p_fwd_signal(ss->signal);
 		slurm_free_job_step_kill_msg(msg->data);
 		break;
 	default:
diff --git a/src/plugins/launch/runjob/launch_runjob.c b/src/plugins/launch/runjob/launch_runjob.c
index 27a307a4e89ecb0fd7f3bf00d71bc88daae0beae..41ef79ab3d02ed634a3ed7bcccd8830fa36dd279 100644
--- a/src/plugins/launch/runjob/launch_runjob.c
+++ b/src/plugins/launch/runjob/launch_runjob.c
@@ -174,7 +174,8 @@ _handle_msg(slurm_msg_t *msg)
 	case SRUN_STEP_SIGNAL:
 		ss = msg->data;
 		debug("received step signal %u RPC", ss->signal);
-		runjob_signal(ss->signal);
+		if (ss->signal)
+			runjob_signal(ss->signal);
 		slurm_free_job_step_kill_msg(msg->data);
 		break;
 	default:
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 1d3b711177aa66a32381fda477e069756428c11b..51e17fb4ee6cde8c105ad7851e73bae6c1d3e479 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -1727,7 +1727,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 #endif
 		job_step_resp.cred           = slurm_cred;
 		job_step_resp.select_jobinfo = step_rec->select_jobinfo;
-		job_step_resp.switch_job     =  step_rec->switch_job;
+		job_step_resp.switch_job     = step_rec->switch_job;
 
 		unlock_slurmctld(job_write_lock);
 		slurm_msg_t_init(&resp);
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 75465489215b2a6376e40e29ca7eedd620ebc8b4..712632844b30d584d2511cb59669796b2258804c 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -668,6 +668,7 @@ struct 	step_record {
 	time_t start_time;      	/* step allocation start time */
 	uint32_t time_limit;      	/* step allocation time limit */
 	dynamic_plugin_data_t *select_jobinfo;/* opaque data, BlueGene */
+	uint16_t state;			/* state of the step. See job_states */
 	uint32_t step_id;		/* step number */
 	slurm_step_layout_t *step_layout;/* info about how tasks are laid out
 					  * in the step */
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 939c4eff05daf278f03fe89aa45263e25affc050..40c4add82b2e21f3e3b6e5f98eaee345486e66be 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2012 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -75,6 +76,8 @@
 
 #define MAX_RETRIES 10
 
+static void _build_pending_step(struct job_record  *job_ptr,
+				job_step_create_request_msg_t *step_specs);
 static int  _count_cpus(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint32_t *usable_cpu_cnt);
 static struct step_record * _create_step_record(struct job_record *job_ptr);
@@ -171,17 +174,36 @@ static struct step_record * _create_step_record(struct job_record *job_ptr)
 	step_ptr = (struct step_record *) xmalloc(sizeof(struct step_record));
 
 	last_job_update = time(NULL);
-	step_ptr->job_ptr = job_ptr;
-	step_ptr->start_time = time(NULL);
+	step_ptr->job_ptr    = job_ptr;
+	step_ptr->exit_code  = NO_VAL;
 	step_ptr->time_limit = INFINITE;
-	step_ptr->jobacct = jobacctinfo_create(NULL);
-	step_ptr->requid = -1;
+	step_ptr->jobacct    = jobacctinfo_create(NULL);
+	step_ptr->requid     = -1;
 	if (list_append (job_ptr->step_list, step_ptr) == NULL)
 		fatal ("_create_step_record: unable to allocate memory");
 
 	return step_ptr;
 }
 
+/* The step with a state of PENDING is used as a placeholder for a host and
+ * port that can be used to wake a pending srun as soon another step ends */
+static void _build_pending_step(struct job_record *job_ptr,
+				job_step_create_request_msg_t *step_specs)
+{
+	struct step_record *step_ptr;
+
+	if ((step_specs->host == NULL) || (step_specs->port == 0))
+		return;
+
+	step_ptr = _create_step_record(job_ptr);
+	if (step_ptr == NULL)
+		return;
+
+	step_ptr->port    = step_specs->port;
+	step_ptr->host    = xstrdup(step_specs->host);
+	step_ptr->state   = JOB_PENDING;
+	step_ptr->step_id = NO_VAL;
+}
 
 /*
  * delete_step_records - delete step record for specified job_ptr
@@ -506,6 +528,30 @@ void signal_step_tasks_on_node(char* node_name, struct step_record *step_ptr,
 	return;
 }
 
+/* A step just completed, signal srun processes with pending steps to retry */
+static void _wake_pending_steps(struct job_record *job_ptr)
+{
+	int max_wake = 5;
+	ListIterator step_iterator;
+	struct step_record *step_ptr;
+
+	if (!job_ptr->step_list)
+		return;
+	step_iterator = list_iterator_create(job_ptr->step_list);
+	if (!step_iterator)
+		fatal("list_iterator_create: malloc failure");
+	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
+		if (step_ptr->state == JOB_PENDING) {
+			srun_step_signal(step_ptr, 0);
+			list_remove (step_iterator);
+			_free_step_rec(step_ptr);
+			if (max_wake-- <= 0)
+				break;
+		}
+	}
+	list_iterator_destroy (step_iterator);
+}
+
 /*
  * job_step_complete - note normal completion the specified job step
  * IN job_id - id of the job to be completed
@@ -556,6 +602,7 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
 		     step_id);
 		return ESLURM_ALREADY_DONE;
 	}
+	_wake_pending_steps(job_ptr);
 	return SLURM_SUCCESS;
 }
 
@@ -1838,10 +1885,8 @@ step_create(job_step_create_request_msg_t *step_specs,
 	if (cpus_per_mp == (uint16_t)NO_VAL)
 		select_g_alter_node_cnt(SELECT_GET_NODE_CPU_CNT,
 					&cpus_per_mp);
-	/* Below is done to get the correct cpu_count and then we need
-	   to set the cpu_count to 0 later so just pretend we are
-	   overcommitting.
-	*/
+	/* Below is done to get the correct cpu_count and then we need to set
+	 * the cpu_count to 0 later so just pretend we are overcommitting. */
 	step_specs->cpu_count = node_count * cpus_per_mp;
 	step_specs->overcommit = 1;
 	step_specs->exclusive = 0;
@@ -1866,7 +1911,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 		return ESLURM_BAD_TASK_COUNT;
 
 	/* we set cpus_per_task to 0 if we can't spread them evenly
-	   over the nodes (hetergeneous systems) */
+	 * over the nodes (hetergeneous systems) */
 	if (!step_specs->cpu_count
 	    || (step_specs->cpu_count % step_specs->num_tasks))
 		cpus_per_task = 0;
@@ -1904,6 +1949,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 		if (step_gres_list)
 			list_destroy(step_gres_list);
 		select_g_select_jobinfo_free(select_jobinfo);
+		if ((ret_code == ESLURM_NODES_BUSY) ||
+		    (ret_code == ESLURM_PORTS_BUSY) ||
+		    (ret_code == ESLURM_INTERCONNECT_BUSY))
+			_build_pending_step(job_ptr, step_specs);
 		return ret_code;
 	}
 #ifdef HAVE_CRAY
@@ -1912,8 +1961,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 #endif
 #ifdef HAVE_BGQ
 	/* Things might of changed here since sometimes users ask for
-	   the wrong size in cnodes to make a block.
-	*/
+	 * the wrong size in cnodes to make a block. */
 	select_g_select_jobinfo_get(select_jobinfo,
 				    SELECT_JOBDATA_NODE_CNT,
 				    &node_count);
@@ -1949,7 +1997,9 @@ step_create(job_step_create_request_msg_t *step_specs,
 		select_g_select_jobinfo_free(select_jobinfo);
 		return ESLURMD_TOOMANYSTEPS;
 	}
-	step_ptr->step_id = job_ptr->next_step_id++;
+	step_ptr->start_time = time(NULL);
+	step_ptr->state      = JOB_RUNNING;
+	step_ptr->step_id    = job_ptr->next_step_id++;
 
 	/* Here is where the node list is set for the step */
 	if (step_specs->node_list &&
@@ -2275,7 +2325,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 	cpu_cnt = step_ptr->cpu_count;
 #endif
 
-	if (protocol_version >= SLURM_2_4_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
 		pack32(step_ptr->job_ptr->job_id, buffer);
 		pack32(step_ptr->step_id, buffer);
 		pack16(step_ptr->ckpt_interval, buffer);
@@ -2284,6 +2334,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 		pack32(step_ptr->cpu_freq, buffer);
 		pack32(task_cnt, buffer);
 		pack32(step_ptr->time_limit, buffer);
+		pack16(step_ptr->state, buffer);
 
 		pack_time(step_ptr->start_time, buffer);
 		if (IS_JOB_SUSPENDED(step_ptr->job_ptr)) {
@@ -2306,7 +2357,38 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 		packstr(step_ptr->gres, buffer);
 		select_g_select_jobinfo_pack(step_ptr->select_jobinfo, buffer,
 					     protocol_version);
-	} else if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_5_PROTOCOL_VERSION) {
+		pack32(step_ptr->job_ptr->job_id, buffer);
+		pack32(step_ptr->step_id, buffer);
+		pack16(step_ptr->ckpt_interval, buffer);
+		pack32(step_ptr->job_ptr->user_id, buffer);
+		pack32(cpu_cnt, buffer);
+		pack32(step_ptr->cpu_freq, buffer);
+		pack32(task_cnt, buffer);
+		pack32(step_ptr->time_limit, buffer);
+
+		pack_time(step_ptr->start_time, buffer);
+		if (IS_JOB_SUSPENDED(step_ptr->job_ptr)) {
+			run_time = step_ptr->pre_sus_time;
+		} else {
+			begin_time = MAX(step_ptr->start_time,
+					 step_ptr->job_ptr->suspend_time);
+			run_time = step_ptr->pre_sus_time +
+				difftime(time(NULL), begin_time);
+		}
+		pack_time(run_time, buffer);
+
+		packstr(step_ptr->job_ptr->partition, buffer);
+		packstr(step_ptr->resv_ports, buffer);
+		packstr(node_list, buffer);
+		packstr(step_ptr->name, buffer);
+		packstr(step_ptr->network, buffer);
+		pack_bit_fmt(pack_bitstr, buffer);
+		packstr(step_ptr->ckpt_dir, buffer);
+		packstr(step_ptr->gres, buffer);
+		select_g_select_jobinfo_pack(step_ptr->select_jobinfo, buffer,
+					     protocol_version);
+	} else if (protocol_version >= SLURM_2_4_PROTOCOL_VERSION) {
 		pack32(step_ptr->job_ptr->job_id, buffer);
 		pack32(step_ptr->step_id, buffer);
 		pack16(step_ptr->ckpt_interval, buffer);
@@ -2954,6 +3036,7 @@ extern void dump_job_step_state(struct job_record *job_ptr,
 	pack16(step_ptr->ckpt_interval, buffer);
 	pack16(step_ptr->cpus_per_task, buffer);
 	pack16(step_ptr->resv_port_cnt, buffer);
+	pack16(step_ptr->state, buffer);
 
 	pack8(step_ptr->no_kill, buffer);
 
@@ -2971,8 +3054,9 @@ extern void dump_job_step_state(struct job_record *job_ptr,
 		pack_bit_fmt(step_ptr->core_bitmap_job, buffer);
 	} else
 		pack32((uint32_t) 0, buffer);
-
 	pack32(step_ptr->time_limit, buffer);
+	pack32(step_ptr->cpu_freq, buffer);
+
 	pack_time(step_ptr->start_time, buffer);
 	pack_time(step_ptr->pre_sus_time, buffer);
 	pack_time(step_ptr->tot_sus_time, buffer);
@@ -3014,9 +3098,9 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	struct step_record *step_ptr = NULL;
 	uint8_t no_kill;
 	uint16_t cyclic_alloc, port, batch_step, bit_cnt;
-	uint16_t ckpt_interval, cpus_per_task, resv_port_cnt;
+	uint16_t ckpt_interval, cpus_per_task, resv_port_cnt, state;
 	uint32_t core_size, cpu_count, exit_code, mem_per_cpu, name_len;
-	uint32_t step_id, time_limit;
+	uint32_t step_id, time_limit, cpu_freq;
 	time_t start_time, pre_sus_time, tot_sus_time, ckpt_time;
 	char *host = NULL, *ckpt_dir = NULL, *core_job = NULL;
 	char *resv_ports = NULL, *name = NULL, *network = NULL;
@@ -3027,13 +3111,14 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	List gres_list = NULL;
 	dynamic_plugin_data_t *select_jobinfo = NULL;
 
-	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
 		safe_unpack32(&step_id, buffer);
 		safe_unpack16(&cyclic_alloc, buffer);
 		safe_unpack16(&port, buffer);
 		safe_unpack16(&ckpt_interval, buffer);
 		safe_unpack16(&cpus_per_task, buffer);
 		safe_unpack16(&resv_port_cnt, buffer);
+		safe_unpack16(&state, buffer);
 
 		safe_unpack8(&no_kill, buffer);
 
@@ -3047,8 +3132,9 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 		safe_unpack32(&core_size, buffer);
 		if (core_size)
 			safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
-
 		safe_unpack32(&time_limit, buffer);
+		safe_unpack32(&cpu_freq, buffer);
+
 		safe_unpack_time(&start_time, buffer);
 		safe_unpack_time(&pre_sus_time, buffer);
 		safe_unpack_time(&tot_sus_time, buffer);
@@ -3084,23 +3170,84 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 		if (select_g_select_jobinfo_unpack(&select_jobinfo, buffer,
 						   protocol_version))
 			goto unpack_error;
-		/* validity test as possible */
-		if (cyclic_alloc > 1) {
-			error("Invalid data for job %u.%u: cyclic_alloc=%u",
-			      job_ptr->job_id, step_id, cyclic_alloc);
-			goto unpack_error;
+	} else if (protocol_version >= SLURM_2_4_PROTOCOL_VERSION) {
+		safe_unpack32(&step_id, buffer);
+		safe_unpack16(&cyclic_alloc, buffer);
+		safe_unpack16(&port, buffer);
+		safe_unpack16(&ckpt_interval, buffer);
+		safe_unpack16(&cpus_per_task, buffer);
+		safe_unpack16(&resv_port_cnt, buffer);
+
+		safe_unpack8(&no_kill, buffer);
+
+		safe_unpack32(&cpu_count, buffer);
+		safe_unpack32(&mem_per_cpu, buffer);
+		safe_unpack32(&exit_code, buffer);
+		if (exit_code != NO_VAL) {
+			safe_unpackstr_xmalloc(&bit_fmt, &name_len, buffer);
+			safe_unpack16(&bit_cnt, buffer);
 		}
-		if (no_kill > 1) {
-			error("Invalid data for job %u.%u: no_kill=%u",
-			      job_ptr->job_id, step_id, no_kill);
+		safe_unpack32(&core_size, buffer);
+		if (core_size)
+			safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
+
+		safe_unpack32(&time_limit, buffer);
+		safe_unpack_time(&start_time, buffer);
+		safe_unpack_time(&pre_sus_time, buffer);
+		safe_unpack_time(&tot_sus_time, buffer);
+		safe_unpack_time(&ckpt_time, buffer);
+
+		safe_unpackstr_xmalloc(&host, &name_len, buffer);
+		safe_unpackstr_xmalloc(&resv_ports, &name_len, buffer);
+		safe_unpackstr_xmalloc(&name, &name_len, buffer);
+		safe_unpackstr_xmalloc(&network, &name_len, buffer);
+		safe_unpackstr_xmalloc(&ckpt_dir, &name_len, buffer);
+
+		safe_unpackstr_xmalloc(&gres, &name_len, buffer);
+		if (gres_plugin_step_state_unpack(&gres_list, buffer,
+						  job_ptr->job_id, step_id,
+						  protocol_version)
+		    != SLURM_SUCCESS)
 			goto unpack_error;
+
+		safe_unpack16(&batch_step, buffer);
+		if (!batch_step) {
+			if (unpack_slurm_step_layout(&step_layout, buffer,
+						     protocol_version))
+				goto unpack_error;
+			switch_alloc_jobinfo(&switch_tmp);
+			if (switch_unpack_jobinfo(switch_tmp, buffer))
+				goto unpack_error;
 		}
+		checkpoint_alloc_jobinfo(&check_tmp);
+		if (checkpoint_unpack_jobinfo(check_tmp, buffer,
+					      protocol_version))
+			goto unpack_error;
+
+		if (select_g_select_jobinfo_unpack(&select_jobinfo, buffer,
+						   protocol_version))
+			goto unpack_error;
+		/* Variables added since version 2.4 */
+		cpu_freq = NO_VAL;
+		state = JOB_RUNNING;
 	} else {
 		error("load_step_state: protocol_version "
 		      "%hu not supported", protocol_version);
 		goto unpack_error;
 	}
 
+	/* validity test as possible */
+	if (cyclic_alloc > 1) {
+		error("Invalid data for job %u.%u: cyclic_alloc=%u",
+		      job_ptr->job_id, step_id, cyclic_alloc);
+		goto unpack_error;
+	}
+	if (no_kill > 1) {
+		error("Invalid data for job %u.%u: no_kill=%u",
+		      job_ptr->job_id, step_id, no_kill);
+		goto unpack_error;
+	}
+
 	step_ptr = find_step_record(job_ptr, step_id);
 	if (step_ptr == NULL)
 		step_ptr = _create_step_record(job_ptr);
@@ -3142,6 +3289,8 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 
 	step_ptr->switch_job   = switch_tmp;
 	step_ptr->check_job    = check_tmp;
+	step_ptr->cpu_freq     = cpu_freq;
+	step_ptr->state        = state;
 
 	step_ptr->exit_code    = exit_code;
 	if (bit_fmt) {
diff --git a/src/squeue/print.c b/src/squeue/print.c
index ce123d85725663738b264b11a2f26bc5dbbd7eb4..dbcdab5a823577ec05189db42b8e68899448fa7e 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -1523,6 +1523,9 @@ static int _filter_step(job_step_info_t * step)
 	char *part;
 	squeue_job_step_t *job_step_id;
 
+	if (step->state == JOB_PENDING)
+		return 1;
+
 	if (params.job_list) {
 		filter = 1;
 		iterator = list_iterator_create(params.job_list);
diff --git a/src/srun/libsrun/launch.c b/src/srun/libsrun/launch.c
index 92852b48bfa66ada4f0c5fc57854aafa176d3ea3..c4feb8a8bf0458c37744b3dce56946ec713b6e01 100644
--- a/src/srun/libsrun/launch.c
+++ b/src/srun/libsrun/launch.c
@@ -159,7 +159,8 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 					 sig_atomic_t *destroy_job)
 {
 	int i, rc;
-	unsigned long my_sleep = 0;
+	unsigned long my_sleep  = 0;
+	unsigned long max_sleep = 29000000;
 	time_t begin_time;
 
 	if (!job) {
@@ -272,9 +273,13 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 		if (opt.no_alloc) {
 			job->step_ctx = slurm_step_ctx_create_no_alloc(
 				&job->ctx_params, job->stepid);
-		} else
+		} else if (opt.immediate) {
 			job->step_ctx = slurm_step_ctx_create(
 				&job->ctx_params);
+		} else {
+			job->step_ctx = slurm_step_ctx_create_timeout(
+				&job->ctx_params, max_sleep);
+		}
 		if (job->step_ctx != NULL) {
 			if (i > 0)
 				info("Job step created");
@@ -311,7 +316,7 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 			my_sleep = (getpid() % 1000) * 100 + 100000;
 		} else {
 			verbose("Job step creation still disabled, retrying");
-			my_sleep = MIN((my_sleep * 2), 29000000);
+			my_sleep = MIN((my_sleep * 2), max_sleep);
 		}
 		/* sleep 0.1 to 29 secs with exponential back-off */
 		usleep(my_sleep);
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index a9489ac8d58a360a5b759de3def36793dd282478..86b96bf7576862cfed78e84f701ce706b1ec9897 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -2630,7 +2630,7 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 		return NULL;
 	}
 
-	for(i=0; i<job_info_ptr->record_count; i++) {
+	for (i=0; i<job_info_ptr->record_count; i++) {
 		job_ptr = &(job_info_ptr->job_array[i]);
 
 		sview_job_info_ptr = xmalloc(sizeof(sview_job_info_t));
@@ -2656,9 +2656,10 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 				   just keep tacking on ionodes to a
 				   node list */
 				sview_job_info_ptr->nodes = xstrdup(tmp_char);
-			} else
+			} else {
 				sview_job_info_ptr->nodes =
 					xstrdup(job_ptr->nodes);
+			}
 			xfree(ionodes);
 		} else
 			sview_job_info_ptr->nodes = xstrdup(job_ptr->nodes);
@@ -2666,9 +2667,10 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 		if (!sview_job_info_ptr->node_cnt)
 			sview_job_info_ptr->node_cnt = _get_node_cnt(job_ptr);
 
-		for(j = 0; j < step_info_ptr->job_step_count; j++) {
+		for (j = 0; j < step_info_ptr->job_step_count; j++) {
 			step_ptr = &(step_info_ptr->job_steps[j]);
-			if (step_ptr->job_id == job_ptr->job_id) {
+			if ((step_ptr->job_id == job_ptr->job_id) &&
+			    (step_ptr->state == JOB_RUNNING)) {
 				list_append(sview_job_info_ptr->step_list,
 					    step_ptr);
 			}
@@ -3326,7 +3328,7 @@ display_it:
 	if (!display_widget) {
 		tree_view = create_treeview(local_display_data,
 					    &grid_button_list);
-		/*set multiple capability here*/
+		/* set multiple capability here */
 		gtk_tree_selection_set_mode(
 			gtk_tree_view_get_selection(tree_view),
 			GTK_SELECTION_MULTIPLE);
@@ -3334,9 +3336,8 @@ display_it:
 		gtk_table_attach_defaults(GTK_TABLE(table),
 					  GTK_WIDGET(tree_view),
 					  0, 1, 0, 1);
-		/* since this function sets the model of the tree_view
-		   to the treestore we don't really care about
-		   the return value */
+		/* since this function sets the model of the tree_view to the
+		 * treestore we don't really care about the return value */
 		create_treestore(tree_view, display_data_job,
 				 SORTID_CNT, SORTID_TIME_SUBMIT, SORTID_COLOR);
 	}
@@ -3458,9 +3459,8 @@ display_it:
 		gtk_table_attach_defaults(popup_win->table,
 					  GTK_WIDGET(tree_view),
 					  0, 1, 0, 1);
-		/* since this function sets the model of the tree_view
-		   to the treestore we don't really care about
-		   the return value */
+		/* since this function sets the model of the tree_view to the
+		 * treestore we don't really care about the return value */
 		create_treestore(tree_view, popup_win->display_data,
 				 SORTID_CNT, SORTID_TIME_SUBMIT, SORTID_COLOR);
 	}
@@ -3475,7 +3475,7 @@ display_it:
 
 
 	/* just linking to another list, don't free the inside, just
-	   the list */
+	 * the list */
 	send_info_list = list_create(NULL);
 	itr = list_iterator_create(info_list);
 	i = -1;
@@ -3498,19 +3498,21 @@ display_it:
 				if (job_ptr->job_id != search_info->int_data) {
 					continue;
 				}
+#if 0
 				/* if we ever want to display just the step
-				   this is where we would do it */
-/* 				if (spec_info->search_info->int_data2 */
-/* 				   == NO_VAL) */
-/* 				break; */
-/* 			step_itr = list_iterator_create( */
-/* 				sview_job_info->step_list); */
-/* 			while ((step_ptr = list_next(itr))) { */
-/* 				if (step_ptr->step_id  */
-/* 				   == spec_info->search_info->int_data2) { */
-/* 					break; */
-/* 				} */
-/* 			} */
+				 * this is where we would do it */
+				if (spec_info->search_info->int_data2
+				    == NO_VAL)
+					break;
+				step_itr = list_iterator_create(
+					sview_job_info->step_list);
+				while ((step_ptr = list_next(itr))) {
+					if (step_ptr->step_id ==
+					    spec_info->search_info->int_data2) {
+						break;
+					}
+				}
+#endif
 				break;
 			case SEARCH_JOB_USER:
 				if (!search_info->gchar_data)