diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index d8c79133d8ad0ea8c68951455626480206f42941..f27c137cfd80e6d5055a71e42f749aec4dee6740 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -98,27 +98,17 @@ allocate_nodes(void)
 	return resp;
 }
 
-/* Return jobid from environment
- *
+/* 
  * Returns jobid if SLURM_JOBID was set in the user's environment
- * else returns 0
+ *  or if --jobid option was given, else returns 0
  */
 uint32_t
 jobid_from_env(void)
 {
-	char *p, *q;
-	uint32_t jobid;
-
-	if (!(p = getenv("SLURM_JOBID")))
-		return 0;
-
-	jobid = (uint32_t) strtoul(p, &q, 10);
-	if (*q != '\0') {
-		error ("Invalid value for SLURM_JOBID: `%s'", p);
-		return 0;
-	} 
-
-	return jobid;
+	if (opt.jobid != NO_VAL)
+		return ((uint32_t) opt.jobid);
+	else 
+		return (0);
 }
 
 resource_allocation_response_msg_t *
diff --git a/src/srun/job.c b/src/srun/job.c
index 57d8ae95cfbd276ac6a5483bd262a02d03fe3bf5..c4082d7f0ca6324fd542b3e8188e5bc18950be4c 100644
--- a/src/srun/job.c
+++ b/src/srun/job.c
@@ -648,8 +648,8 @@ _relative_hosts(hostlist_t hl)
 	 */
 	n = hostlist_count(rlist);
 	if (n < opt.min_nodes) {
-		info("Warning: Only %d node(s) beyond relative location "
-			"specified, resetting nnodes to %d", n, n);
+		info("Warning: Only %d node%s available in relative set, "
+		     "resetting nnodes to %d", n, (n>1 ? "s":""), n);
 		opt.min_nodes = n;
 	}
 
diff --git a/src/srun/launch.c b/src/srun/launch.c
index ac337f33d77ce8472768aabd1a540470e6afa232..d70d555fcdd9a060312fc93cf5bc66bfd6c726d9 100644
--- a/src/srun/launch.c
+++ b/src/srun/launch.c
@@ -370,7 +370,7 @@ _send_msg_rc(slurm_msg_t *msg)
 	int rc     = 0;
 	int errnum = 0;
 
-       	if ((rc = slurm_send_recv_rc_msg(msg, &errnum, 0)) < 0) 
+       	if ((rc = slurm_send_recv_rc_msg(msg, &errnum, opt.msg_timeout)) < 0) 
 		return SLURM_ERROR;
 
 	if (errnum != 0)
diff --git a/src/srun/msg.c b/src/srun/msg.c
index 09beab07f69612246cfd931ae57f81363921e753..355b1e0b864ca4d815f0c012b4e8eb0cab403233 100644
--- a/src/srun/msg.c
+++ b/src/srun/msg.c
@@ -100,59 +100,29 @@ static void     _print_pid_list(const char *host, int ntasks,
 
 
 #ifdef HAVE_TOTALVIEW
-/* Convert node name to address string, eg. "123.45.67.8", 
- *	also return the index in the job table (-1 if not found) */
-static char *
-_node_name_to_addr(const char *name, job_t *job, int *inx)
-{
-	int i;
-	char *buf = xmalloc(28);
-	char *colon;
-
-	for (i=0; i<job->nhosts; i++) {
-		if (strcmp(name, job->host[i]))
-			continue;
-		slurm_print_slurm_addr(&job->slurmd_addr[i], buf, 128);
-		/* This returns address:port, we need to remove ":port" */
-		colon = strchr(buf, (int)':');
-		if (colon)
-			colon[0] = '\0'; 
-		*inx = i;
-		return buf;
-	}
-
-	error("_node_name_to_addr error on %s", name);
-	*inx = -1;
-	return NULL;
-}
-
+/*
+ * Install entry in the MPI_proctable for host with node id `nodeid'
+ *  and the number of tasks `ntasks' with pid array `pid'
+ */
 static void
-_build_tv_list(job_t *job, char *host, int ntasks, uint32_t *pid)
+_build_tv_list(job_t *job, char *host, int nodeid, int ntasks, uint32_t *pid)
 {
-	MPIR_PROCDESC * tv_tasks;
-	int i, node_inx, task_id;
-	char *node_addr;
+	int i;
 	static int tasks_recorded = 0;
 
-	node_addr = _node_name_to_addr(host, job, &node_inx);
-	if ((node_addr == NULL) || (node_inx < 0))
-		return;
-
 	if (MPIR_proctable_size == 0) {
 		MPIR_proctable_size = opt.nprocs;
 		MPIR_proctable = xmalloc(sizeof(MPIR_PROCDESC) * opt.nprocs);
 	}
 
 	for (i = 0; i < ntasks; i++) {
+		int taskid          = job->tids[nodeid][i];
+		MPIR_PROCDESC *tv   = &MPIR_proctable[taskid];
+		tv->host_name       = job->host[nodeid];
+		tv->executable_name = remote_argv[0];
+		tv->pid             = pid[i];
+
 		tasks_recorded++;
-		task_id = job->tids[node_inx][i];
-		tv_tasks = &MPIR_proctable[task_id];
-		tv_tasks->host_name = node_addr;
-		tv_tasks->executable_name = remote_argv[0];
-		tv_tasks->pid = pid[i];
-		debug("task=%d host=%s executable=%s pid=%d", task_id,
-		      tv_tasks->host_name, tv_tasks->executable_name, 
-		      tv_tasks->pid);
 	}
 
 	if (tasks_recorded == opt.nprocs) {
@@ -185,26 +155,21 @@ static bool _job_msg_done(job_t *job)
 static void
 _process_launch_resp(job_t *job, launch_tasks_response_msg_t *msg)
 {
-	if ((msg->srun_node_id >= 0) 
-	    && (msg->srun_node_id < job->nhosts)) {
+	if ((msg->srun_node_id < 0) || (msg->srun_node_id >= job->nhosts)) {
+		error ("Bad launch response from %s", msg->node_name);
+		return;
+	}
 
-		pthread_mutex_lock(&job->task_mutex);
-		job->host_state[msg->srun_node_id] = SRUN_HOST_REPLIED;
-		pthread_mutex_unlock(&job->task_mutex);
+	pthread_mutex_lock(&job->task_mutex);
+	job->host_state[msg->srun_node_id] = SRUN_HOST_REPLIED;
+	pthread_mutex_unlock(&job->task_mutex);
 #ifdef HAVE_TOTALVIEW
-		_build_tv_list( job, msg->node_name, msg->count_of_pids,
-				msg->local_pids );
+	_build_tv_list( job, msg->node_name, msg->srun_node_id, 
+			msg->count_of_pids,  msg->local_pids   );
 #endif
-		_print_pid_list( msg->node_name, msg->count_of_pids, 
-				 msg->local_pids, remote_argv[0] );
+	_print_pid_list( msg->node_name, msg->count_of_pids, 
+			msg->local_pids, remote_argv[0]     );
 
-	} else {
-		error("launch resp from %s has bad task id %d",
-				msg->node_name, msg->srun_node_id);
-#ifdef HAVE_TOTALVIEW
-		tv_launch_failure();
-#endif
-	}
 }
 
 static void
@@ -349,7 +314,8 @@ _reattach_handler(job_t *job, slurm_msg_t *msg)
 		resp->executable_name = NULL; /* nothing left to free */
 		remote_argv[1] = NULL;
 	}
-	_build_tv_list(job, resp->node_name, resp->ntasks, resp->local_pids);
+	_build_tv_list(job, resp->node_name, resp->srun_node_id,
+	                    resp->ntasks, resp->local_pids      );
 #endif
 	_print_pid_list(resp->node_name, resp->ntasks, resp->local_pids, 
 			resp->executable_name);
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 2ab38ac2a398032d36e7733c19a377a29e227a63..a9f165bc3ddc76fbf132a8e2b34a4ae18db1e8f6 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -106,6 +106,7 @@
 #define OPT_OVERCOMMIT	0x1a
 #define OPT_HOLD	0x1b
 #define OPT_RELATIVE    0x1c
+#define OPT_JOBID       0x1d
 
 /* constraint type options */
 #define OPT_MINCPUS     0x50
@@ -223,6 +224,8 @@ struct poptOption runTable[] = {
 	{"job-name", 'J', POPT_ARG_STRING, &opt.job_name, 0,
 	 "name of job",
 	 "jobname"},
+	{"jobid", '\0',   POPT_ARG_INT, NULL, OPT_JOBID, 
+         "run under already allocated job", "id" },
 	{"output", 'o', POPT_ARG_STRING, 0, OPT_OUTPUT,
 	 "location of stdout redirection",
 	 "out"},
@@ -245,10 +248,12 @@ struct poptOption runTable[] = {
 	{"wait", 'W', POPT_ARG_INT, &opt.max_wait, OPT_WAIT,
 	 "seconds to wait after first task ends before killing job",
 	 "sec"},
-	{"max-launch-time", '\0', POPT_ARG_INT | POPT_ARGFLAG_DOC_HIDDEN, 
-	  &opt.max_launch_time, 0, NULL, NULL },
+	{"max-launch-time",  '\0', POPT_ARG_INT | POPT_ARGFLAG_DOC_HIDDEN, 
+	  &opt.max_launch_time,  0, NULL, NULL },
 	{"max-exit-timeout", '\0', POPT_ARG_INT | POPT_ARGFLAG_DOC_HIDDEN, 
 	  &opt.max_exit_timeout, 0, NULL, NULL },
+	{"msg-timeout",      '\0', POPT_ARG_INT | POPT_ARGFLAG_DOC_HIDDEN, 
+	  &opt.msg_timeout,      0, NULL, NULL },
 	POPT_TABLEEND
 };
 
@@ -565,6 +570,7 @@ static void _opt_default()
 	opt.max_threads = MAX_THREADS;
 
 	opt.job_name = NULL;
+	opt.jobid    = NO_VAL;
 
 	opt.distribution = SRUN_DIST_UNKNOWN;
 
@@ -580,9 +586,6 @@ static void _opt_default()
 	opt.batch = false;
 	opt.share = false;
 	opt.no_kill = false;
-#ifdef HAVE_TOTALVIEW
-	opt.totalview = _under_totalview();
-#endif
 
 	opt.immediate	= false;
 
@@ -606,9 +609,22 @@ static void _opt_default()
 	opt.exc_nodes	    = NULL;
 	opt.max_launch_time = 60; /* 60 seconds to launch job             */
 	opt.max_exit_timeout= 60; /* Warn user 60 seconds after task exit */
+	opt.msg_timeout     = 2;  /* Default launch msg timeout           */
 
 	mode	= MODE_NORMAL;
 
+#ifdef HAVE_TOTALVIEW
+	/*
+	 * Reset some default values if running under TotalView:
+	 */
+	if ((opt.totalview = _under_totalview())) {
+		opt.max_launch_time = 120;
+		opt.max_threads     = 1;
+		opt.msg_timeout     = 15;
+	}
+
+#endif
+
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -630,6 +646,7 @@ struct env_vars {
 };
 
 env_vars_t env_vars[] = {
+	{"SLURM_JOBID",         OPT_INT, &opt.jobid,         NULL           },
 	{"SLURM_NPROCS",        OPT_INT, &opt.nprocs,        &opt.nprocs_set},
 	{"SLURM_CPUS_PER_TASK", OPT_INT, &opt.cpus_per_task, &opt.cpus_set  },
 	{"SLURM_PARTITION",     OPT_STRING,  &opt.partition, NULL           },
@@ -735,7 +752,7 @@ _get_int(const char *arg, const char *what)
 	char *p;
 	long int result = strtol(arg, &p, 10);
 
-	if (*p != '\0') {
+	if ((*p != '\0') || (result < 0L)) {
 		error ("Invalid numeric value \"%s\" for %s.", arg, what);
 		exit(1);
 	}
@@ -866,6 +883,10 @@ static void _opt_args(int ac, char **av)
 			}
 			break;
 
+		case OPT_JOBID:
+			opt.jobid = _get_int(arg, "jobid");
+			break;
+
 		case OPT_REALMEM:
 			opt.realmem = (int) _to_bytes(arg);
 			if (opt.realmem < 0) {
diff --git a/src/srun/opt.h b/src/srun/opt.h
index 84c54567f0ebcab29dc4dc296770ea9c8198740d..f3f7521a8672560bbf2030c0c604e6c8f3cef2fd 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -110,6 +110,7 @@ typedef struct srun_options {
 	enum distribution_t
 		distribution;	/* --distribution=, -m dist	*/
 	char *job_name;		/* --job-name=,     -J name	*/
+	unsigned int jobid;     /* --jobid=jobid                */
 
 	char *ofname;		/* --output -o filename         */
 	char *ifname;		/* --input  -i filename         */
@@ -151,6 +152,7 @@ typedef struct srun_options {
 	bool no_alloc;		/* --no-allocate, -Z		*/
 	int  max_launch_time;   /* Undocumented                 */
 	int  max_exit_timeout;  /* Undocumented                 */
+	int  msg_timeout;       /* Undocumented                 */
 
 } opt_t;
 
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 2b6fefe1884e81a67af4b8401ea230b8b31254c9..a72c94d23e91bb5b63fb3f0c0de6a54b01c3385c 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -190,9 +190,12 @@ int main(int ac, char **av)
 	/* job structure should now be filled in */
 
 	/*
-	 *  Set nodelist environment variable
+	 *  Enhance environment for job
 	 */
 	setenvf("SLURM_NODELIST=%s", job->nodelist);
+	setenvf("SLURM_JOBID=%u",    job->jobid);
+	setenvf("SLURM_NPROCS=%d",   opt.nprocs);
+	setenvf("SLURM_NNODES=%d",   job->nhosts);
 
 	if (msg_thr_create(job) < 0)
 		job_fatal(job, "Unable to create msg thread");