diff --git a/doc/html/job_container_plugins.shtml b/doc/html/job_container_plugins.shtml
index 7404a4ad052dff23cad5cf36668e8c3ca22ae096..20cb9cf60ff1c84982063de406dc1040cf950898 100644
--- a/doc/html/job_container_plugins.shtml
+++ b/doc/html/job_container_plugins.shtml
@@ -82,7 +82,7 @@ Process tracking container value as set by the proctrack plugin.</p>
 the plugin should return Slurm_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int container_p_add_pid (uint32_t job_id, pid_t pid);</p>
+<p class="commandline">int container_p_add_pid (uint32_t job_id, pid_t pid, uid_t uid);</p>
 <p style="margin-left:.2in"><b>Description</b>: Add a specific process ID
 to a given job's container. The process is first placed into a process tracking
 container (PAGG).</p>
@@ -90,7 +90,9 @@ container (PAGG).</p>
 <span class="commandline"> job_id</span>&nbsp; &nbsp;&nbsp;(input)
 Job ID.<br>
 <span class="commandline"> pid</span>&nbsp; &nbsp;&nbsp;(input)
-Process ID.</p>
+Process ID.<br>
+<span class="commandline"> uid</span>&nbsp; &nbsp;&nbsp;(input)
+Owning user ID.</p>
 <p style="margin-left:.2in"><b>Returns</b>: Slurm_SUCCESS if successful. On failure,
 the plugin should return Slurm_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
diff --git a/src/plugins/job_container/cncu/job_container_cncu.c b/src/plugins/job_container/cncu/job_container_cncu.c
index 71703c5b6bffe15bf0c201755f533730e9061b2e..7caa30a6702b1ae9f7878efa337e81ebe81dc733 100644
--- a/src/plugins/job_container/cncu/job_container_cncu.c
+++ b/src/plugins/job_container/cncu/job_container_cncu.c
@@ -323,7 +323,7 @@ extern int container_p_add_cont(uint32_t job_id, uint64_t cont_id)
 }
 
 /* Add a process to a job container, create the proctrack container to add */
-extern int container_p_add_pid(uint32_t job_id, pid_t pid)
+extern int container_p_add_pid(uint32_t job_id, pid_t pid, uid_t uid)
 {
 	slurmd_job_t job;
 
@@ -332,6 +332,7 @@ extern int container_p_add_pid(uint32_t job_id, pid_t pid)
 #endif
 	memset(&job, 0, sizeof(slurmd_job_t));
 	job.jmgr_pid = pid;
+	job.uid = uid;
 	if (slurm_container_create(&job) != SLURM_SUCCESS) {
 		error("%s: slurm_container_create job(%u)", plugin_type,job_id);
 		return SLURM_ERROR;
diff --git a/src/plugins/job_container/none/job_container_none.c b/src/plugins/job_container/none/job_container_none.c
index 2f18037841d2e7b52fd68c8e6d8e9fcaaa815487..cb7e7e1a5ee521245a8f30d5e483e678cadb10ed 100644
--- a/src/plugins/job_container/none/job_container_none.c
+++ b/src/plugins/job_container/none/job_container_none.c
@@ -277,7 +277,7 @@ extern int container_p_add_cont(uint32_t job_id, uint64_t cont_id)
 }
 
 /* Add a process to a job container, create the proctrack container to add */
-extern int container_p_add_pid(uint32_t job_id, pid_t pid)
+extern int container_p_add_pid(uint32_t job_id, pid_t pid, uid_t uid)
 {
 #if _DEBUG
 	slurmd_job_t job;
@@ -286,6 +286,7 @@ extern int container_p_add_pid(uint32_t job_id, pid_t pid)
 
 	memset(&job, 0, sizeof(slurmd_job_t));
 	job.jmgr_pid = pid;
+	job.uid = uid;
 	if (slurm_container_create(&job) != SLURM_SUCCESS) {
 		error("%s: slurm_container_create job(%u)", plugin_type,job_id);
 		return SLURM_ERROR;
diff --git a/src/slurmd/common/job_container_plugin.c b/src/slurmd/common/job_container_plugin.c
index 8b6106b8475bf2847211d9009ef1345b1db8f9c9..10b0b6dd08df1522c4c55646c97a1e8f584fa4d9 100644
--- a/src/slurmd/common/job_container_plugin.c
+++ b/src/slurmd/common/job_container_plugin.c
@@ -49,7 +49,7 @@
 typedef struct job_container_ops {
 	int	(*container_p_create)	(uint32_t job_id);
 	int	(*container_p_add_cont)	(uint32_t job_id, uint64_t cont_id);
-	int	(*container_p_add_pid)	(uint32_t job_id, pid_t pid);
+	int	(*container_p_add_pid)	(uint32_t job_id, pid_t pid, uid_t uid);
 	int	(*container_p_delete)	(uint32_t job_id);
 	int	(*container_p_restore)	(char *dir_name, bool recover);
 
@@ -193,7 +193,7 @@ extern int container_g_create(uint32_t job_id)
 /* Add a process to the specified job's container.
  * A proctrack containter will be generated containing the process
  * before container_g_add_cont() is called (see below). */
-extern int container_g_add_pid(uint32_t job_id, pid_t pid)
+extern int container_g_add_pid(uint32_t job_id, pid_t pid, uid_t uid)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -203,7 +203,7 @@ extern int container_g_add_pid(uint32_t job_id, pid_t pid)
 	slurm_mutex_lock(&g_container_context_lock);
 	for (i = 0; ((i < g_container_context_num) && (rc == SLURM_SUCCESS));
 	     i++) {
-		rc = (*(ops[i].container_p_add_pid))(job_id, pid);
+		rc = (*(ops[i].container_p_add_pid))(job_id, pid, uid);
 	}
 	slurm_mutex_unlock(&g_container_context_lock);
 
diff --git a/src/slurmd/common/job_container_plugin.h b/src/slurmd/common/job_container_plugin.h
index 6e7e624bb6f10ca3017cf86258ae0ffd338331bf..19e29c878dd0c11029c790f3069feec0cafd0fed 100644
--- a/src/slurmd/common/job_container_plugin.h
+++ b/src/slurmd/common/job_container_plugin.h
@@ -65,7 +65,7 @@ extern int container_g_create(uint32_t job_id);
 /* Add a process to the specified job's container.
  * A proctrack containter will be generated containing the process
  * before container_g_add_cont() is called (see below). */
-extern int container_g_add_pid(uint32_t job_id, pid_t pid);
+extern int container_g_add_pid(uint32_t job_id, pid_t pid, uid_t uid);
 
 /* Add a proctrack container (PAGG) to the specified job's container
  * The PAGG will be the job's cont_id returned by proctrack/sgi_job */
diff --git a/src/slurmd/common/run_script.c b/src/slurmd/common/run_script.c
index bf7c28ad34f1fe8e1d7fc4fcf3019eef66733fd8..5db528ebf10209a7bdd6858b2c9389c4b9a5d8aa 100644
--- a/src/slurmd/common/run_script.c
+++ b/src/slurmd/common/run_script.c
@@ -54,11 +54,13 @@
 #include <string.h>
 #include <glob.h>
 
+#include "slurm/slurm_errno.h"
+#include "src/common/list.h"
+#include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/xassert.h"
-#include "src/common/list.h"
 
+#include "src/slurmd/common/job_container_plugin.h"
 #include "src/slurmd/common/run_script.h"
 
 /*
@@ -106,15 +108,16 @@ int waitpid_timeout (const char *name, pid_t pid, int *pstatus, int timeout)
  * Run a prolog or epilog script (does NOT drop privileges)
  * name IN: class of program (prolog, epilog, etc.),
  * path IN: pathname of program to run
- * jobid IN: info on associated job
+ * job_id IN: info on associated job
  * max_wait IN: maximum time to wait in seconds, -1 for no limit
  * env IN: environment variables to use on exec, sets minimal environment
  *	if NULL
+ * uid IN: user ID of job owner
  * RET 0 on success, -1 on failure.
  */
 static int
-run_one_script(const char *name, const char *path, uint32_t jobid,
-	   int max_wait, char **env)
+_run_one_script(const char *name, const char *path, uint32_t job_id,
+		int max_wait, char **env, uid_t uid)
 {
 	int status;
 	pid_t cpid;
@@ -123,9 +126,9 @@ run_one_script(const char *name, const char *path, uint32_t jobid,
 	if (path == NULL || path[0] == '\0')
 		return 0;
 
-	if (jobid) {
+	if (job_id) {
 		debug("[job %u] attempting to run %s [%s]",
-			jobid, name, path);
+			job_id, name, path);
 	} else
 		debug("attempting to run %s [%s]", name, path);
 
@@ -154,6 +157,9 @@ run_one_script(const char *name, const char *path, uint32_t jobid,
 		exit(127);
 	}
 
+	if (container_g_add_pid(job_id, cpid, uid) != SLURM_SUCCESS)
+		error("container_g_add_pid(%u): %m", job_id);
+
 	if (waitpid_timeout(name, cpid, &status, max_wait) < 0)
 		return (-1);
 	return status;
@@ -204,8 +210,8 @@ static List _script_list_create (const char *pattern)
 	return l;
 }
 
-int run_script(const char *name, const char *pattern, uint32_t jobid,
-	   int max_wait, char **env)
+int run_script(const char *name, const char *pattern, uint32_t job_id,
+	       int max_wait, char **env, uid_t uid)
 {
 	int rc = 0;
 	List l;
@@ -221,7 +227,7 @@ int run_script(const char *name, const char *pattern, uint32_t jobid,
 
 	i = list_iterator_create (l);
 	while ((s = list_next (i))) {
-		rc = run_one_script (name, s, jobid, max_wait, env);
+		rc = _run_one_script (name, s, job_id, max_wait, env, uid);
 		if (rc) {
 			error ("%s: exited with status 0x%04x\n", s, rc);
 			break;
diff --git a/src/slurmd/common/run_script.h b/src/slurmd/common/run_script.h
index 0ba175ad319d1a550d34cbbb0d609de0c88a49a9..9beb815ba9582c297f333ebd2bdd14a7cb8872aa 100644
--- a/src/slurmd/common/run_script.h
+++ b/src/slurmd/common/run_script.h
@@ -62,9 +62,10 @@ int waitpid_timeout (const char *name, pid_t pid, int *pstatus, int timeout);
  * max_wait IN: maximum time to wait in seconds, -1 for no limit
  * env IN: environment variables to use on exec, sets minimal environment 
  *	if NULL
+ * uid IN: user ID of job owner
  * RET 0 on success, -1 on failure.
  */
 int run_script(const char *name, const char *path, uint32_t jobid, 
-	       int max_wait, char **env);
+	       int max_wait, char **env, uid_t uid);
 
 #endif /* _RUN_SCRIPT_H */
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index 8ed4df59e578bb7f1c2f906c3f977f295581f79c..e99fc1c83844633207c8279c1d9250c24bcc3bbf 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -1070,11 +1070,11 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 #ifndef HAVE_FRONT_END
 	if (first_job_run) {
 		int rc;
+		if (container_g_create(req->job_id))
+			error("container_g_create(%u): %m", req->job_id);
 		rc =  _run_prolog(req->job_id, req->uid, NULL,
 				  req->spank_job_env, req->spank_job_env_size,
 				  req->complete_nodelist);
-		if (container_g_create(req->job_id))
-			error("container_g_create(%u): %m", req->job_id);
 		if (rc) {
 			int term_sig, exit_status;
 			if (WIFSIGNALED(rc)) {
@@ -1405,11 +1405,11 @@ _rpc_batch_job(slurm_msg_t *msg, bool new_msg)
 		resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo,
 							  SELECT_PRINT_RESV_ID);
 #endif
+		if (container_g_create(req->job_id))
+			error("container_g_create(%u): %m", req->job_id);
 		rc = _run_prolog(req->job_id, req->uid, resv_id,
 				 req->spank_job_env, req->spank_job_env_size,
 				 req->nodes);
-		if (container_g_create(req->job_id))
-			error("container_g_create(%u): %m", req->job_id);
 		xfree(resv_id);
 		if (rc) {
 			int term_sig, exit_status;
@@ -2001,7 +2001,7 @@ _rpc_health_check(slurm_msg_t *msg)
 	if ((rc == SLURM_SUCCESS) && (conf->health_check_program)) {
 		char *env[1] = { NULL };
 		rc = run_script("health_check", conf->health_check_program,
-				0, 60, env);
+				0, 60, env, 0);
 	}
 
 	/* Take this opportunity to enforce any job memory limits */
@@ -2779,26 +2779,14 @@ _rpc_file_bcast(slurm_msg_t *msg)
 		error("sbcast: fork failure");
 		return errno;
 	} else if (child > 0) {
+		if (container_g_add_pid(job_id, child, req_uid) !=
+		    SLURM_SUCCESS)
+			error("container_g_add_pid(%u): %m", job_id);
 		waitpid(child, &rc, 0);
 		xfree(groups);
 		return WEXITSTATUS(rc);
 	}
 
-#ifdef HAVE_REAL_CRAY
-	/* Cray systems require files be created within the job's container */
-	setpgrp();
-	job.cont_id = 0;
-	job.jmgr_pid = getpid();
-	job.pgid = getpgid(job.jmgr_pid);
-	job.uid = req_uid;
-	if ((rc = slurm_container_create(&job))) {
-		error("sbcast: slurm_container_create(%u): %m", job_id);
-		return rc;
-	}
-	slurm_container_add(&job, job.jmgr_pid);
-	container_g_add(job_id, job.cont_id);
-#endif
-
 	/* The child actually performs the I/O and exits with
 	 * a return code, do not return! */
 
@@ -4277,14 +4265,14 @@ _destroy_env(char **env)
 }
 
 static int
-run_spank_job_script (const char *mode, char **env)
+_run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid)
 {
 	pid_t cpid;
 	int status = 0;
 	int pfds[2];
 
 	if (pipe (pfds) < 0) {
-		error ("run_spank_job_script: pipe: %m");
+		error ("_run_spank_job_script: pipe: %m");
 		return (-1);
 	}
 
@@ -4315,6 +4303,8 @@ run_spank_job_script (const char *mode, char **env)
 		exit (127);
 	}
 
+	if (container_g_add_pid(job_id, cpid, uid) != SLURM_SUCCESS)
+		error("container_g_add_pid(%u): %m", job_id);
 	close (pfds[0]);
 
 	if (_send_slurmd_conf_lite (pfds[1], conf) < 0)
@@ -4341,7 +4331,7 @@ run_spank_job_script (const char *mode, char **env)
 }
 
 static int _run_job_script(const char *name, const char *path,
-		uint32_t jobid, int timeout, char **env)
+			   uint32_t jobid, int timeout, char **env, uid_t uid)
 {
 	int status, rc;
 	/*
@@ -4350,8 +4340,8 @@ static int _run_job_script(const char *name, const char *path,
 	 *   If both "script" mechanisms fail, prefer to return the "real"
 	 *   prolog/epilog status.
 	 */
-	status = run_spank_job_script(name, env);
-	if ((rc = run_script(name, path, jobid, timeout, env)))
+	status = _run_spank_job_script(name, env, jobid, uid);
+	if ((rc = run_script(name, path, jobid, timeout, env, uid)))
 		status = rc;
 	return (status);
 }
@@ -4373,7 +4363,7 @@ _run_prolog(uint32_t jobid, uid_t uid, char *resv_id,
 	slurm_mutex_unlock(&conf->config_mutex);
 	_add_job_running_prolog(jobid);
 
-	rc = _run_job_script("prolog", my_prolog, jobid, -1, my_env);
+	rc = _run_job_script("prolog", my_prolog, jobid, -1, my_env, uid);
 	_remove_job_running_prolog(jobid);
 	xfree(my_prolog);
 	_destroy_env(my_env);
@@ -4449,7 +4439,7 @@ _run_prolog(uint32_t jobid, uid_t uid, char *resv_id,
 	timer_struct.timer_cond  = &timer_cond;
 	timer_struct.timer_mutex = &timer_mutex;
 	pthread_create(&timer_id, &timer_attr, &_prolog_timer, &timer_struct);
-	rc = _run_job_script("prolog", my_prolog, jobid, -1, my_env);
+	rc = _run_job_script("prolog", my_prolog, jobid, -1, my_env, uid);
 	slurm_mutex_lock(&timer_mutex);
 	prolog_fini = true;
 	pthread_cond_broadcast(&timer_cond);
@@ -4488,7 +4478,8 @@ _run_epilog(uint32_t jobid, uid_t uid, char *resv_id,
 	slurm_mutex_unlock(&conf->config_mutex);
 
 	_wait_for_job_running_prolog(jobid);
-	error_code = _run_job_script("epilog", my_epilog, jobid, -1, my_env);
+	error_code = _run_job_script("epilog", my_epilog, jobid, -1, my_env,
+				     uid);
 	xfree(my_epilog);
 	_destroy_env(my_env);
 
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index 0bc00cebd4da65e7bccf476bb316d0478145afa0..d39bddaad25983b4fcf155f484375320d6611802 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -933,8 +933,6 @@ job_manager(stepd_step_rec_t *job)
 		rc = ESLURMD_SETUP_ENVIRONMENT_ERROR;
 		goto fail1;
 	}
-	if (container_g_add_cont(job->jobid, job->cont_id) != SLURM_SUCCESS)
-		error("container_g_add_cont: %m");
 
 #ifdef HAVE_ALPS_CRAY
 	/*
@@ -1551,6 +1549,8 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 		}
 	}
 //	jobacct_gather_set_proctrack_container_id(job->cont_id);
+	if (container_g_add_cont(job->jobid, job->cont_id) != SLURM_SUCCESS)
+		error("container_g_add_cont(%u): %m", job->jobid);
 
 	/*
 	 * Now it's ok to unblock the tasks, so they may call exec.
@@ -2416,12 +2416,6 @@ _run_script_as_user(const char *name, const char *path, stepd_step_rec_t *job,
 		return -1;
 	}
 
-	if ((job->cont_id == 0) &&
-	    (slurm_container_create(job) != SLURM_SUCCESS))
-		error("slurm_container_create: %m");
-	if (container_g_add_cont(job->jobid, job->cont_id) != SLURM_SUCCESS)
-		error("container_g_add_cont: %m");
-
 	if ((ei = fork_child_with_wait_info(0)) == NULL) {
 		error ("executing %s: fork: %m", name);
 		return -1;
@@ -2463,8 +2457,9 @@ _run_script_as_user(const char *name, const char *path, stepd_step_rec_t *job,
 		exit(127);
 	}
 
-	if (slurm_container_add(job, cpid) != SLURM_SUCCESS)
-		error("slurm_container_add: %m");
+	if ((job->jobid != 0) &&	/* Ignore system processes */
+	    (container_g_add_pid(job->jobid, cpid, job->uid) != SLURM_SUCCESS))
+		error("container_g_add_cont: %m");
 
 	if (exec_wait_signal_child (ei) < 0)
 		error ("run_script_as_user: Failed to wakeup %s", name);
diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.c b/src/slurmd/slurmstepd/step_terminate_monitor.c
index 4185ee6c3990d80f43d9b470ac0301fb83e7a924..93f5d56ab348094f0a69706cb49523050ac5b3b8 100644
--- a/src/slurmd/slurmstepd/step_terminate_monitor.c
+++ b/src/slurmd/slurmstepd/step_terminate_monitor.c
@@ -33,6 +33,7 @@
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 #include "src/common/read_config.h"
+#include "src/slurmd/common/job_container_plugin.h"
 #include "src/slurmd/slurmstepd/step_terminate_monitor.h"
 
 #if defined(__NetBSD__)
@@ -58,8 +59,8 @@ static char *program_name;
 static uint32_t recorded_jobid = NO_VAL;
 static uint32_t recorded_stepid = NO_VAL;
 
-static void *monitor(void *);
-static int call_external_program(void);
+static void *_monitor(void *);
+static int _call_external_program(void);
 
 void step_terminate_monitor_start(uint32_t jobid, uint32_t stepid)
 {
@@ -85,7 +86,7 @@ void step_terminate_monitor_start(uint32_t jobid, uint32_t stepid)
 	slurm_conf_unlock();
 
 	slurm_attr_init(&attr);
-	pthread_create(&tid, &attr, monitor, NULL);
+	pthread_create(&tid, &attr, _monitor, NULL);
 	slurm_attr_destroy(&attr);
 	running_flag = 1;
 	recorded_jobid = jobid;
@@ -124,12 +125,12 @@ void step_terminate_monitor_stop(void)
 }
 
 
-static void *monitor(void *notused)
+static void *_monitor(void *notused)
 {
 	struct timespec ts = {0, 0};
 	int rc;
 
-	info("monitor is running");
+	info("_monitor is running");
 
 	ts.tv_sec = time(NULL) + 1 + timeout;
 
@@ -139,19 +140,19 @@ static void *monitor(void *notused)
 
 	rc = pthread_cond_timedwait(&cond, &lock, &ts);
 	if (rc == ETIMEDOUT) {
-		call_external_program();
+		_call_external_program();
 	} else if (rc != 0) {
-		error("Error waiting on condition in monitor: %m");
+		error("Error waiting on condition in _monitor: %m");
 	}
 done:
 	pthread_mutex_unlock(&lock);
 
-	info("monitor is stopping");
+	info("_monitor is stopping");
 	return NULL;
 }
 
 
-static int call_external_program(void)
+static int _call_external_program(void)
 {
 	int status, rc, opt;
 	pid_t cpid;
@@ -200,6 +201,10 @@ static int call_external_program(void)
 		exit(127);
 	}
 
+	if (container_g_add_pid(recorded_jobid, cpid, getuid()) !=
+	    SLURM_SUCCESS)
+		error("container_g_add_pid(%u): %m", recorded_jobid);
+
 	opt = WNOHANG;
 	time_remaining = max_wait;
 	while (1) {