From 15712b37d131113df134f487efa5ac1a32820d87 Mon Sep 17 00:00:00 2001
From: Nathan Yee <nyee32@schedmd.com>
Date: Tue, 23 Jul 2013 13:48:47 -0700
Subject: [PATCH] Rename various global plugin names to be the standard *_g_*
 naming schema.

---
 doc/html/proctrack_plugins.shtml              |  14 +--
 doc/html/taskplugins.shtml                    |  36 +++---
 src/api/step_ctx.c                            |   8 +-
 src/common/slurm_errno.c                      |   2 +-
 src/common/slurm_protocol_defs.c              |   6 +-
 src/common/slurm_protocol_pack.c              |  34 +++---
 src/common/switch.c                           |  56 ++++-----
 src/common/switch.h                           | 112 +++++++++---------
 .../job_container/cncu/job_container_cncu.c   |   6 +-
 .../job_container/none/job_container_none.c   |   6 +-
 .../jobacct_gather/aix/jobacct_gather_aix.c   |   2 +-
 .../cgroup/jobacct_gather_cgroup.c            |   2 +-
 .../linux/jobacct_gather_linux.c              |   2 +-
 src/plugins/proctrack/aix/proctrack_aix.c     |  22 ++--
 .../proctrack/cgroup/proctrack_cgroup.c       |  44 +++----
 src/plugins/proctrack/cray/proctrack_cray.c   |  20 ++--
 .../proctrack/linuxproc/proctrack_linuxproc.c |  20 ++--
 src/plugins/proctrack/lua/proctrack_lua.c     |  64 +++++-----
 src/plugins/proctrack/pgid/Makefile.in        |  22 ++--
 src/plugins/proctrack/pgid/proctrack_pgid.c   |  20 ++--
 .../proctrack/sgi_job/proctrack_sgi_job.c     |  16 +--
 src/plugins/switch/nrt/nrt.c                  |   2 +-
 src/plugins/task/affinity/task_affinity.c     |  96 +++++++--------
 src/plugins/task/cgroup/task_cgroup.c         |  58 ++++-----
 src/plugins/task/cray/task_cray.c             |  76 ++++++------
 src/plugins/task/none/task_none.c             |  76 ++++++------
 src/slurmctld/backup.c                        |   2 +-
 src/slurmctld/controller.c                    |   4 +-
 src/slurmctld/job_mgr.c                       |   4 +-
 src/slurmctld/step_mgr.c                      |  22 ++--
 src/slurmd/common/proctrack.c                 |  54 ++++-----
 src/slurmd/common/proctrack.h                 |  30 ++---
 src/slurmd/common/task_plugin.c               |  45 +++----
 src/slurmd/common/task_plugin.h               |  24 ++--
 src/slurmd/slurmd/req.c                       |  22 ++--
 src/slurmd/slurmd/slurmd.c                    |   4 +-
 src/slurmd/slurmstepd/mgr.c                   |  48 ++++----
 src/slurmd/slurmstepd/req.c                   |  14 +--
 src/slurmd/slurmstepd/task.c                  |   4 +-
 39 files changed, 551 insertions(+), 548 deletions(-)

diff --git a/doc/html/proctrack_plugins.shtml b/doc/html/proctrack_plugins.shtml
index 04840d89d97..165a5c9cb20 100644
--- a/doc/html/proctrack_plugins.shtml
+++ b/doc/html/proctrack_plugins.shtml
@@ -79,10 +79,10 @@ Successful API calls are not required to reset errno to a known value.</p>
 <p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
-<p class="commandline">int slurm_container_plugin_create (stepd_step_rec_t *job);</p>
+<p class="commandline">int proctrack_p_plugin_create (stepd_step_rec_t *job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Create a container.
 The caller should insure that be valid
-<span class="commandline">slurm_container_plugin_destroy()</span> is called.
+<span class="commandline">proctrack_p_plugin_destroy()</span> is called.
 This function must put the container ID directory in the job structure's
 variable <i>cont_id</i>.</p>
 <p style="margin-left:.2in"><b>Argument</b>:
@@ -92,7 +92,7 @@ Pointer to a slurmd job structure.</p>
 the plugin should return Slurm_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int slurm_container_plugin_add (stepd_step_rec_t *job, pid_t pid);</p>
+<p class="commandline">int proctrack_p_plugin_add (stepd_step_rec_t *job, pid_t pid);</p>
 <p style="margin-left:.2in"><b>Description</b>: Add a specific process ID
 to a given job step's container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
@@ -104,7 +104,7 @@ The ID of the process to add to this job's container.</p>
 the plugin should return Slurm_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int slurm_container_plugin_signal (uint64_t id, int signal);</p>
+<p class="commandline">int proctrack_p_plugin_signal (uint64_t id, int signal);</p>
 <p style="margin-left:.2in"><b>Description</b>: Signal all processes in a given
 job step container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
@@ -120,7 +120,7 @@ its errno to an appropriate value to indicate the reason for failure.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p class="commandline">int slurm_container_plugin_destroy (uint64_t id);</p>
+<p class="commandline">int proctrack_p_plugin_destroy (uint64_t id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Destroy or otherwise
 invalidate a job step container.
 This does not imply the container is empty, just that it is no longer
@@ -132,7 +132,7 @@ Job step container's ID.</p>
 the plugin should return Slurm_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">uint64_t slurm_container_plugin_find (pid_t pid);</p>
+<p class="commandline">uint64_t proctrack_p_plugin_find (pid_t pid);</p>
 <p style="margin-left:.2in"><b>Description</b>:
 Given a process ID, return its job step container ID.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
@@ -141,7 +141,7 @@ A process ID.</p>
 <p style="margin-left:.2in"><b>Returns</b>: The job step container ID
 with this process or zero if none is found.</p>
 
-<p class="commandline">uint32_t slurm_container_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids);</p>
+<p class="commandline">uint32_t proctrack_p_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids);</p>
 <p style="margin-left:.2in"><b>Description</b>:
 Given a process container ID, fill in all the process IDs in the container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml
index 5d23b9ae87f..5ffacadcf16 100644
--- a/doc/html/taskplugins.shtml
+++ b/doc/html/taskplugins.shtml
@@ -47,7 +47,7 @@ SLURM_ERROR.</p>
 <p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
-<p class="commandline">int task_slurmd_batch_request (uint32_t job_id,
+<p class="commandline">int task_p_slurmd_batch_request (uint32_t job_id,
 batch_job_launch_msg_t *req);</p>
 <p style="margin-left:.2in"><b>Description</b>: Prepare to launch a batch job.
 Establish node, socket, and core resource availability for it.
@@ -63,7 +63,7 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_slurmd_launch_request (uint32_t job_id,
+<p class="commandline">int task_p_slurmd_launch_request (uint32_t job_id,
 launch_tasks_request_msg_t *req, uint32_t node_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Prepare to launch a job.
 Establish node, socket, and core resource availability for it.
@@ -82,7 +82,7 @@ ID of the node on which resources are being acquired (zero origin).</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_slurmd_reserve_resources (uint32_t job_id,
+<p class="commandline">int task_p_slurmd_reserve_resources (uint32_t job_id,
 launch_tasks_request_msg_t *req, uint32_t node_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Reserve resources for
 the initiation of a job. Executed by the <b>slurmd</b> daemon as user root.</p>
@@ -101,7 +101,7 @@ ID of the node on which the resources are being acquired
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_slurmd_suspend_job (uint32_t job_id);</p>
+<p class="commandline">int task_p_slurmd_suspend_job (uint32_t job_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Temporarily release resources
 previously reserved for a job.
 Executed by the <b>slurmd</b> daemon as user root.</p>
@@ -112,9 +112,9 @@ ID of the job which is being suspended.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_slurmd_resume_job (uint32_t job_id);</p>
+<p class="commandline">int task_p_slurmd_resume_job (uint32_t job_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Reclaim resources which
-were previously released using the task_slurmd_suspend_job function.
+were previously released using the task_p_slurmd_suspend_job function.
 Executed by the <b>slurmd</b> daemon as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
@@ -123,7 +123,7 @@ ID of the job which is being resumed.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_slurmd_release_resources (uint32_t job_id);</p>
+<p class="commandline">int task_p_slurmd_release_resources (uint32_t job_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Release resources previously
 reserved for a job. Executed by the <b>slurmd</b> daemon as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
@@ -133,8 +133,8 @@ ID of the job which has completed.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_pre_setuid (stepd_step_rec_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: task_pre_setuid() is called
+<p class="commandline">int task_p_pre_setuid (stepd_step_rec_t *job);</p>
+<p style="margin-left:.2in"><b>Description</b>: task_p_pre_setuid() is called
 before setting the UID for the user to launch his jobs.
 Executed by the <b>slurmstepd</b> program as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
@@ -146,8 +146,8 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_pre_launch_priv (stepd_step_rec_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: task_pre_launch_priv() is called
+<p class="commandline">int task_p_pre_launch_priv (stepd_step_rec_t *job);</p>
+<p style="margin-left:.2in"><b>Description</b>: task_p_pre_launch_priv() is called
 by each forked task just after the fork. Note that no particular task related
 information is available in the job structure at that time.
 Executed by the <b>slurmstepd</b> program as user root.</p>
@@ -160,8 +160,8 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_pre_launch (stepd_step_rec_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: task_pre_launch() is called
+<p class="commandline">int task_p_pre_launch (stepd_step_rec_t *job);</p>
+<p style="margin-left:.2in"><b>Description</b>: task_p_pre_launch() is called
 prior to exec of application task.
 Executed by the <b>slurmstepd</b> program as the job's owner.
 It is followed by <b>TaskProlog</b> program (as configured in <b>slurm.conf</b>)
@@ -175,9 +175,9 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<a name="get_errno"><p class="commandline">int task_post_term
-(stepd_step_rec_t *job, slurmd_task_info_t *task);</p></a>
-<p style="margin-left:.2in"><b>Description</b>: task_term() is called
+<a name="get_errno"><p class="commandline">int task_p_post_term
+(stepd_step_rec_t *job, slurmd_task_p_info_t *task);</p></a>
+<p style="margin-left:.2in"><b>Description</b>: task_p_term() is called
 after termination of job step.
 Executed by the <b>slurmstepd</b> program as the job's owner.
 It is preceded by <b>--task-epilog</b> (from <b>srun</b> command line)
@@ -195,8 +195,8 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_post_step (stepd_step_rec_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: task_post_step() is called
+<p class="commandline">int task_p_post_step (stepd_step_rec_t *job);</p>
+<p style="margin-left:.2in"><b>Description</b>: task_p_post_step() is called
 after termination of all the tasks of the job step.
 Executed by the <b>slurmstepd</b> program as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index cf1f77bfd26..da0d4ce2d57 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -332,12 +332,12 @@ slurm_step_ctx_create_no_alloc (const slurm_step_ctx_params_t *step_params,
 		step_req->min_nodes,
 		step_req->num_tasks);
 
-	if (switch_alloc_jobinfo(&step_resp->switch_job) < 0)
-		fatal("switch_alloc_jobinfo: %m");
-	if (switch_build_jobinfo(step_resp->switch_job,
+	if (switch_g_alloc_jobinfo(&step_resp->switch_job) < 0)
+		fatal("switch_g_alloc_jobinfo: %m");
+	if (switch_g_build_jobinfo(step_resp->switch_job,
 				 step_resp->step_layout,
 				 step_req->network) < 0)
-		fatal("switch_build_jobinfo: %m");
+		fatal("switch_g_build_jobinfo: %m");
 
 
 
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index 9215ed76d47..6af0fee6672 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -432,7 +432,7 @@ static char *_lookup_slurm_api_errtab(int errnum)
 	if ((res == NULL) &&
 	    (errnum >= ESLURM_SWITCH_MIN) &&
 	    (errnum <= ESLURM_SWITCH_MAX))
-		res = switch_strerror(errnum);
+		res = switch_g_strerror(errnum);
 
 	return res;
 }
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index c51aa73532f..8b7310ddb2c 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -754,7 +754,7 @@ extern void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg
 	xfree(msg->restart_dir);
 
 	if (msg->switch_job)
-		switch_free_jobinfo(msg->switch_job);
+		switch_g_free_jobinfo(msg->switch_job);
 
 	if (msg->options)
 		job_options_destroy(msg->options);
@@ -912,7 +912,7 @@ extern void slurm_free_suspend_msg(suspend_msg_t *msg)
 extern void slurm_free_suspend_int_msg(suspend_int_msg_t *msg)
 {
 	if (msg) {
-		interconnect_suspend_info_free(msg->switch_info);
+		switch_g_suspend_info_free(msg->switch_info);
 		xfree(msg);
 	}
 }
@@ -2081,7 +2081,7 @@ extern void slurm_free_job_step_create_response_msg(
 		if (msg->select_jobinfo)
 			select_g_select_jobinfo_free(msg->select_jobinfo);
 		if (msg->switch_job)
-			switch_free_jobinfo(msg->switch_job);
+			switch_g_free_jobinfo(msg->switch_job);
 
 		xfree(msg);
 	}
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index 1ca89d0589f..f85402be7bc 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -3723,7 +3723,7 @@ pack_job_step_create_response_msg(job_step_create_response_msg_t * msg,
 		slurm_cred_pack(msg->cred, buffer);
 		select_g_select_jobinfo_pack(
 			msg->select_jobinfo, buffer, protocol_version);
-		switch_pack_jobinfo(msg->switch_job, buffer);
+		switch_g_pack_jobinfo(msg->switch_job, buffer);
 	} else {
 		error("pack_job_step_create_response_msg: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -3757,10 +3757,10 @@ unpack_job_step_create_response_msg(job_step_create_response_msg_t ** msg,
 		if (select_g_select_jobinfo_unpack(
 			    &tmp_ptr->select_jobinfo, buffer, protocol_version))
 			goto unpack_error;
-		switch_alloc_jobinfo(&tmp_ptr->switch_job);
-		if (switch_unpack_jobinfo(tmp_ptr->switch_job, buffer)) {
-			error("switch_unpack_jobinfo: %m");
-			switch_free_jobinfo(tmp_ptr->switch_job);
+		switch_g_alloc_jobinfo(&tmp_ptr->switch_job);
+		if (switch_g_unpack_jobinfo(tmp_ptr->switch_job, buffer)) {
+			error("switch_g_unpack_jobinfo: %m");
+			switch_g_free_jobinfo(tmp_ptr->switch_job);
 			goto unpack_error;
 		}
 	} else {
@@ -6983,7 +6983,7 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 		packstr(msg->task_prolog, buffer);
 		packstr(msg->task_epilog, buffer);
 		pack16(msg->slurmd_debug, buffer);
-		switch_pack_jobinfo(msg->switch_job, buffer);
+		switch_g_pack_jobinfo(msg->switch_job, buffer);
 		job_options_pack(msg->options, buffer);
 		packstr(msg->alias_list, buffer);
 		packstr(msg->complete_nodelist, buffer);
@@ -7055,7 +7055,7 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 		packstr(msg->task_prolog, buffer);
 		packstr(msg->task_epilog, buffer);
 		pack16(msg->slurmd_debug, buffer);
-		switch_pack_jobinfo(msg->switch_job, buffer);
+		switch_g_pack_jobinfo(msg->switch_job, buffer);
 		job_options_pack(msg->options, buffer);
 		packstr(msg->alias_list, buffer);
 		packstr(msg->complete_nodelist, buffer);
@@ -7171,10 +7171,10 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 		safe_unpackstr_xmalloc(&msg->task_epilog, &uint32_tmp, buffer);
 		safe_unpack16(&msg->slurmd_debug, buffer);
 
-		switch_alloc_jobinfo(&msg->switch_job);
-		if (switch_unpack_jobinfo(msg->switch_job, buffer) < 0) {
-			error("switch_unpack_jobinfo: %m");
-			switch_free_jobinfo(msg->switch_job);
+		switch_g_alloc_jobinfo(&msg->switch_job);
+		if (switch_g_unpack_jobinfo(msg->switch_job, buffer) < 0) {
+			error("switch_g_unpack_jobinfo: %m");
+			switch_g_free_jobinfo(msg->switch_job);
 			goto unpack_error;
 		}
 		msg->options = job_options_create();
@@ -7267,10 +7267,10 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 		safe_unpackstr_xmalloc(&msg->task_epilog, &uint32_tmp, buffer);
 		safe_unpack16(&msg->slurmd_debug, buffer);
 
-		switch_alloc_jobinfo(&msg->switch_job);
-		if (switch_unpack_jobinfo(msg->switch_job, buffer) < 0) {
-			error("switch_unpack_jobinfo: %m");
-			switch_free_jobinfo(msg->switch_job);
+		switch_g_alloc_jobinfo(&msg->switch_job);
+		if (switch_g_unpack_jobinfo(msg->switch_job, buffer) < 0) {
+			error("switch_g_unpack_jobinfo: %m");
+			switch_g_free_jobinfo(msg->switch_job);
 			goto unpack_error;
 		}
 		msg->options = job_options_create();
@@ -9129,7 +9129,7 @@ static void _pack_suspend_int_msg(suspend_int_msg_t *msg, Buf buffer,
 	pack16(msg->op, buffer);
 	pack32(msg->job_id,  buffer);
 	pack8(msg->indf_susp, buffer);
-	interconnect_suspend_info_pack(msg->switch_info, buffer);
+	switch_g_suspend_info_pack(msg->switch_info, buffer);
 }
 
 static int  _unpack_suspend_int_msg(suspend_int_msg_t **msg_ptr, Buf buffer,
@@ -9144,7 +9144,7 @@ static int  _unpack_suspend_int_msg(suspend_int_msg_t **msg_ptr, Buf buffer,
 	safe_unpack16(&msg->op,     buffer);
 	safe_unpack32(&msg->job_id, buffer);
 	safe_unpack8(&msg->indf_susp, buffer);
-	if (interconnect_suspend_info_unpack(&msg->switch_info, buffer))
+	if (switch_g_suspend_info_unpack(&msg->switch_info, buffer))
 		goto unpack_error;
 	return SLURM_SUCCESS;
 
diff --git a/src/common/switch.c b/src/common/switch.c
index bbc91f07a55..2aea21a795d 100644
--- a/src/common/switch.c
+++ b/src/common/switch.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  src/common/switch.c - Generic switch (interconnect) for slurm
+ *  src/common/switch.c - Generic switch (switch_g) for slurm
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -234,7 +234,7 @@ extern int  switch_g_reconfig(void)
 	return (*(ops.reconfig))( );
 }
 
-extern int  switch_save(char *dir_name)
+extern int  switch_g_save(char *dir_name)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -242,7 +242,7 @@ extern int  switch_save(char *dir_name)
 	return (*(ops.state_save))( dir_name );
 }
 
-extern int  switch_restore(char *dir_name, bool recover)
+extern int  switch_g_restore(char *dir_name, bool recover)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -250,7 +250,7 @@ extern int  switch_restore(char *dir_name, bool recover)
 	return (*(ops.state_restore))( dir_name, recover );
 }
 
-extern int  switch_clear(void)
+extern int  switch_g_clear(void)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -258,7 +258,7 @@ extern int  switch_clear(void)
 	return (*(ops.state_clear))( );
 }
 
-extern int  switch_alloc_jobinfo(switch_jobinfo_t **jobinfo)
+extern int  switch_g_alloc_jobinfo(switch_jobinfo_t **jobinfo)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -266,7 +266,7 @@ extern int  switch_alloc_jobinfo(switch_jobinfo_t **jobinfo)
 	return (*(ops.alloc_jobinfo))( jobinfo );
 }
 
-extern int  switch_build_jobinfo(switch_jobinfo_t *jobinfo,
+extern int  switch_g_build_jobinfo(switch_jobinfo_t *jobinfo,
 				 slurm_step_layout_t *step_layout,
 				 char *network)
 {
@@ -276,7 +276,7 @@ extern int  switch_build_jobinfo(switch_jobinfo_t *jobinfo,
 	return (*(ops.build_jobinfo))( jobinfo, step_layout, network );
 }
 
-extern switch_jobinfo_t *switch_copy_jobinfo(switch_jobinfo_t *jobinfo)
+extern switch_jobinfo_t *switch_g_copy_jobinfo(switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
 		return NULL;
@@ -284,7 +284,7 @@ extern switch_jobinfo_t *switch_copy_jobinfo(switch_jobinfo_t *jobinfo)
 	return (*(ops.copy_jobinfo))( jobinfo );
 }
 
-extern void switch_free_jobinfo(switch_jobinfo_t *jobinfo)
+extern void switch_g_free_jobinfo(switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
 		return;
@@ -292,7 +292,7 @@ extern void switch_free_jobinfo(switch_jobinfo_t *jobinfo)
 	(*(ops.free_jobinfo))( jobinfo );
 }
 
-extern int switch_pack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer)
+extern int switch_g_pack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -300,7 +300,7 @@ extern int switch_pack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer)
 	return (*(ops.pack_jobinfo))( jobinfo, buffer );
 }
 
-extern int switch_unpack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer)
+extern int switch_g_unpack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -317,7 +317,7 @@ extern int  switch_g_get_jobinfo(switch_jobinfo_t *jobinfo,
 	return (*(ops.get_jobinfo))( jobinfo, data_type, data);
 }
 
-extern void switch_print_jobinfo(FILE *fp, switch_jobinfo_t *jobinfo)
+extern void switch_g_print_jobinfo(FILE *fp, switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
 		return;
@@ -325,7 +325,7 @@ extern void switch_print_jobinfo(FILE *fp, switch_jobinfo_t *jobinfo)
 	(*(ops.print_jobinfo)) (fp, jobinfo);
 }
 
-extern char *switch_sprint_jobinfo( switch_jobinfo_t *jobinfo,
+extern char *switch_g_sprint_jobinfo( switch_jobinfo_t *jobinfo,
 				    char *buf, size_t size)
 {
 	if ( switch_init() < 0 )
@@ -334,7 +334,7 @@ extern char *switch_sprint_jobinfo( switch_jobinfo_t *jobinfo,
 	return (*(ops.string_jobinfo)) (jobinfo, buf, size);
 }
 
-extern int interconnect_node_init(void)
+extern int switch_g_node_init(void)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -342,7 +342,7 @@ extern int interconnect_node_init(void)
 	return (*(ops.node_init)) ();
 }
 
-extern int interconnect_node_fini(void)
+extern int switch_g_node_fini(void)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -350,7 +350,7 @@ extern int interconnect_node_fini(void)
 	return (*(ops.node_fini)) ();
 }
 
-extern int interconnect_preinit(switch_jobinfo_t *jobinfo)
+extern int switch_g_preinit(switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -358,7 +358,7 @@ extern int interconnect_preinit(switch_jobinfo_t *jobinfo)
 	return (*(ops.job_preinit)) (jobinfo);
 }
 
-extern int interconnect_init(switch_jobinfo_t *jobinfo, uid_t uid,
+extern int switch_g_init(switch_jobinfo_t *jobinfo, uid_t uid,
 			     char *job_name)
 {
 	if ( switch_init() < 0 )
@@ -367,7 +367,7 @@ extern int interconnect_init(switch_jobinfo_t *jobinfo, uid_t uid,
 	return (*(ops.job_init)) (jobinfo, uid, job_name);
 }
 
-extern int interconnect_suspend_test(switch_jobinfo_t *jobinfo)
+extern int switch_g_suspend_test(switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -375,7 +375,7 @@ extern int interconnect_suspend_test(switch_jobinfo_t *jobinfo)
 	return (*(ops.job_suspend_test)) (jobinfo);
 }
 
-extern void interconnect_suspend_info_get(switch_jobinfo_t *jobinfo,
+extern void switch_g_suspend_info_get(switch_jobinfo_t *jobinfo,
 					  void **suspend_info)
 {
 	if ( switch_init() < 0 )
@@ -384,7 +384,7 @@ extern void interconnect_suspend_info_get(switch_jobinfo_t *jobinfo,
 	(*(ops.job_suspend_info_get)) (jobinfo, suspend_info);
 }
 
-extern void interconnect_suspend_info_pack(void *suspend_info, Buf buffer)
+extern void switch_g_suspend_info_pack(void *suspend_info, Buf buffer)
 {
 	if ( switch_init() < 0 )
 		return;
@@ -392,7 +392,7 @@ extern void interconnect_suspend_info_pack(void *suspend_info, Buf buffer)
 	(*(ops.job_suspend_info_pack)) (suspend_info, buffer);
 }
 
-extern int interconnect_suspend_info_unpack(void **suspend_info, Buf buffer)
+extern int switch_g_suspend_info_unpack(void **suspend_info, Buf buffer)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -400,7 +400,7 @@ extern int interconnect_suspend_info_unpack(void **suspend_info, Buf buffer)
 	return (*(ops.job_suspend_info_unpack)) (suspend_info, buffer);
 }
 
-extern void interconnect_suspend_info_free(void *suspend_info)
+extern void switch_g_suspend_info_free(void *suspend_info)
 {
 	if ( switch_init() < 0 )
 		return;
@@ -408,7 +408,7 @@ extern void interconnect_suspend_info_free(void *suspend_info)
 	(*(ops.job_suspend_info_free)) (suspend_info);
 }
 
-extern int interconnect_suspend(void *suspend_info, int max_wait)
+extern int switch_g_suspend(void *suspend_info, int max_wait)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -416,7 +416,7 @@ extern int interconnect_suspend(void *suspend_info, int max_wait)
 	return (*(ops.job_suspend)) (suspend_info, max_wait);
 }
 
-extern int interconnect_resume(void *suspend_info, int max_wait)
+extern int switch_g_resume(void *suspend_info, int max_wait)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -424,7 +424,7 @@ extern int interconnect_resume(void *suspend_info, int max_wait)
 	return (*(ops.job_resume)) (suspend_info, max_wait);
 }
 
-extern int interconnect_fini(switch_jobinfo_t *jobinfo)
+extern int switch_g_fini(switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -432,7 +432,7 @@ extern int interconnect_fini(switch_jobinfo_t *jobinfo)
 	return (*(ops.job_fini)) (jobinfo);
 }
 
-extern int interconnect_postfini(switch_jobinfo_t *jobinfo, uid_t pgid,
+extern int switch_g_postfini(switch_jobinfo_t *jobinfo, uid_t pgid,
 				 uint32_t job_id, uint32_t step_id )
 {
 	if ( switch_init() < 0 )
@@ -442,7 +442,7 @@ extern int interconnect_postfini(switch_jobinfo_t *jobinfo, uid_t pgid,
 				      job_id, step_id);
 }
 
-extern int interconnect_attach(switch_jobinfo_t *jobinfo, char ***env,
+extern int switch_g_attach(switch_jobinfo_t *jobinfo, char ***env,
 			       uint32_t nodeid, uint32_t procid,
 			       uint32_t nnodes, uint32_t nprocs, uint32_t gid)
 {
@@ -453,7 +453,7 @@ extern int interconnect_attach(switch_jobinfo_t *jobinfo, char ***env,
 				    nodeid, procid, nnodes, nprocs, gid);
 }
 
-extern int switch_get_errno(void)
+extern int switch_g_get_errno(void)
 {
 	if ( switch_init() < 0 )
 		return SLURM_ERROR;
@@ -461,7 +461,7 @@ extern int switch_get_errno(void)
 	return (*(ops.switch_errno))( );
 }
 
-extern char *switch_strerror(int errnum)
+extern char *switch_g_strerror(int errnum)
 {
 	if ( switch_init() < 0 )
 		return NULL;
diff --git a/src/common/switch.h b/src/common/switch.h
index dca89ea0fce..2983e32f632 100644
--- a/src/common/switch.h
+++ b/src/common/switch.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  src/common/switch.h - Generic switch (interconnect) info for slurm
+ *  src/common/switch.h - Generic switch (switch_g) info for slurm
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -81,7 +81,7 @@ extern int switch_fini (void);
  * IN dir_name - directory into which switch state is saved
  * RET         - slurm error code
  */
-extern int  switch_save   (char *dir_name);
+extern int  switch_g_save   (char *dir_name);
 
 /* restore any global switch state from a file within the specified directory
  * the actual file name used in plugin specific
@@ -90,21 +90,21 @@ extern int  switch_save   (char *dir_name);
  *               a clean slate.
  * RET         - slurm error code
  */
-extern int  switch_restore(char *dir_name, bool recover);
+extern int  switch_g_restore(char *dir_name, bool recover);
 
 /* clear all current switch window allocation information
  * RET         - slurm error code
  */
-extern int  switch_clear(void);
+extern int  switch_g_clear(void);
 
 /* return the number of a switch-specific error code */
-extern int switch_get_errno(void);
+extern int switch_g_get_errno(void);
 
 /* return a string description of a switch specific error code
  * IN errnum - switch specific error return code
  * RET       - string describing the nature of the error
  */
-extern char *switch_strerror(int errnum);
+extern char *switch_g_strerror(int errnum);
 
 /******************************************************\
  * JOB-SPECIFIC SWITCH CREDENTIAL MANAGEMENT FUNCIONS *
@@ -113,47 +113,47 @@ extern char *switch_strerror(int errnum);
 /* allocate storage for a switch job credential
  * OUT jobinfo - storage for a switch job credential
  * RET         - slurm error code
- * NOTE: storage must be freed using g_switch_free_jobinfo
+ * NOTE: storage must be freed using g_switch_g_free_jobinfo
  */
-extern int  switch_alloc_jobinfo (switch_jobinfo_t **jobinfo);
+extern int  switch_g_alloc_jobinfo (switch_jobinfo_t **jobinfo);
 
 /* fill a job's switch credential
  * OUT jobinfo  - storage for a switch job credential
  * IN  step_layout - the layout of the step with at least the nodes,
  *                   tasks_per_node and tids set
  * IN  network  - plugin-specific network info (e.g. protocol)
- * NOTE: storage must be freed using g_switch_free_jobinfo
+ * NOTE: storage must be freed using g_switch_g_free_jobinfo
  */
-extern int  switch_build_jobinfo(switch_jobinfo_t *jobinfo,
+extern int  switch_g_build_jobinfo(switch_jobinfo_t *jobinfo,
 				 slurm_step_layout_t *step_layout,
 				 char *network);
 
 /* copy a switch job credential
  * IN jobinfo - the switch job credential to be copied
  * RET        - the copy
- * NOTE: returned value must be freed using g_switch_free_jobinfo
+ * NOTE: returned value must be freed using g_switch_g_free_jobinfo
  */
-extern switch_jobinfo_t *switch_copy_jobinfo(switch_jobinfo_t *jobinfo);
+extern switch_jobinfo_t *switch_g_copy_jobinfo(switch_jobinfo_t *jobinfo);
 
 /* free storage previously allocated for a switch job credential
  * IN jobinfo  - the switch job credential to be freed
  */
-extern void switch_free_jobinfo  (switch_jobinfo_t *jobinfo);
+extern void switch_g_free_jobinfo  (switch_jobinfo_t *jobinfo);
 
 /* pack a switch job credential into a buffer in machine independent form
  * IN jobinfo  - the switch job credential to be saved
  * OUT buffer  - buffer with switch credential appended
  * RET         - slurm error code
  */
-extern int  switch_pack_jobinfo  (switch_jobinfo_t *jobinfo, Buf buffer);
+extern int  switch_g_pack_jobinfo  (switch_jobinfo_t *jobinfo, Buf buffer);
 
 /* unpack a switch job credential from a buffer
  * OUT jobinfo - the switch job credential read
  * IN  buffer  - buffer with switch credential read from current pointer loc
  * RET         - slurm error code
- * NOTE: returned value must be freed using g_switch_free_jobinfo
+ * NOTE: returned value must be freed using g_switch_g_free_jobinfo
  */
-extern int  switch_unpack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer);
+extern int  switch_g_unpack_jobinfo(switch_jobinfo_t *jobinfo, Buf buffer);
 
 /* get some field from a switch job credential
  * IN jobinfo - the switch job credential
@@ -192,7 +192,7 @@ extern bool switch_g_part_comp(void);
 /*
  * Restore the switch allocation information "jobinfo" for an already
  * allocated job step, most likely to restore the switch information
- * after a call to switch_clear().
+ * after a call to switch_g_clear().
  */
 extern int switch_g_job_step_allocated(switch_jobinfo_t *jobinfo,
 	char *nodelist);
@@ -201,7 +201,7 @@ extern int switch_g_job_step_allocated(switch_jobinfo_t *jobinfo,
  * IN fp      - an open file pointer
  * IN jobinfo - a switch job credential
  */
-extern void switch_print_jobinfo(FILE *fp, switch_jobinfo_t *jobinfo);
+extern void switch_g_print_jobinfo(FILE *fp, switch_jobinfo_t *jobinfo);
 
 /* write job credential to a string
  * IN jobinfo - a switch job credential
@@ -209,7 +209,7 @@ extern void switch_print_jobinfo(FILE *fp, switch_jobinfo_t *jobinfo);
  * IN size    - byte size of buf
  * RET        - the string, same as buf
  */
-extern char *switch_sprint_jobinfo( switch_jobinfo_t *jobinfo,
+extern char *switch_g_sprint_jobinfo( switch_jobinfo_t *jobinfo,
 			char *buf, size_t size);
 
 /********************************************************************\
@@ -217,42 +217,42 @@ extern char *switch_sprint_jobinfo( switch_jobinfo_t *jobinfo,
 \********************************************************************/
 
 /*
- * Setup node for interconnect use.
+ * Setup node for switch_g use.
  *
  * This function is run from the top level slurmd only once per
  * slurmd run. It may be used, for instance, to perform some one-time
- * interconnect setup or spawn an error handling thread.
+ * switch_g setup or spawn an error handling thread.
  *
  */
-extern int interconnect_node_init(void);
+extern int switch_g_node_init(void);
 
 /*
- * Finalize interconnect on node.
+ * Finalize switch_g on node.
  *
  * This function is called once as slurmd exits (slurmd will wait for
  * this function to return before continuing the exit process)
  */
-extern int interconnect_node_fini(void);
+extern int switch_g_node_fini(void);
 
 
 /*
- * Notes on job related interconnect functions:
+ * Notes on job related switch_g functions:
  *
- * Interconnect functions are run within slurmd in the following way:
+ * switch_g functions are run within slurmd in the following way:
  * (Diagram courtesy of Jim Garlick [see qsw.c] )
  *
  *  Process 1 (root)        Process 2 (root, user)  |  Process 3 (user task)
  *                                                  |
- *  interconnect_preinit                            |
- *  fork ------------------ interconnect_init       |
+ *  switch_g_preinit                            |
+ *  fork ------------------ switch_g_init       |
  *  waitpid                 setuid, chdir, etc.     |
- *                          fork N procs -----------+--- interconnect_attach
+ *                          fork N procs -----------+--- switch_g_attach
  *                          wait all                |    exec mpi process
- *                          interconnect_fini*      |
- *  interconnect_postfini                           |
+ *                          switch_g_fini*      |
+ *  switch_g_postfini                           |
  *                                                  |
  *
- * [ *Note: interconnect_fini() is run as the uid of the job owner, not root ]
+ * [ *Note: switch_g_fini() is run as the uid of the job owner, not root ]
  */
 
 /*
@@ -260,19 +260,19 @@ extern int interconnect_node_fini(void);
  *
  * pre is run as root in the first slurmd process, the so called job
  * manager. This function can be used to perform any initialization
- * that needs to be performed in the same process as interconnect_fini()
+ * that needs to be performed in the same process as switch_g_fini()
  *
  */
-extern int interconnect_preinit(switch_jobinfo_t *jobinfo);
+extern int switch_g_preinit(switch_jobinfo_t *jobinfo);
 
 /*
- * initialize interconnect on node for job. This function is run from the
- * slurmstepd process (some interconnect implementations may require
- * interconnect init functions to be executed from a separate process
- * than the process executing interconnect_fini() [e.g. QsNet])
+ * initialize switch_g on node for job. This function is run from the
+ * slurmstepd process (some switch_g implementations may require
+ * switch_g init functions to be executed from a separate process
+ * than the process executing switch_g_fini() [e.g. QsNet])
  *
  */
-extern int interconnect_init(switch_jobinfo_t *jobinfo, uid_t uid,
+extern int switch_g_init(switch_jobinfo_t *jobinfo, uid_t uid,
 			     char *job_name);
 
 /*
@@ -281,7 +281,7 @@ extern int interconnect_init(switch_jobinfo_t *jobinfo, uid_t uid,
  * IN jobinfo - switch information for a job step
  * RET SLURM_SUCCESS or error code
  */
-extern int interconnect_suspend_test(switch_jobinfo_t *jobinfo);
+extern int switch_g_suspend_test(switch_jobinfo_t *jobinfo);
 
 /*
  * Build data structure containing information needed to suspend or resume
@@ -290,7 +290,7 @@ extern int interconnect_suspend_test(switch_jobinfo_t *jobinfo);
  * IN jobinfo - switch information for a job step
  * RET data to be sent with job suspend/resume RPC
  */
-extern void interconnect_suspend_info_get(switch_jobinfo_t *jobinfo,
+extern void switch_g_suspend_info_get(switch_jobinfo_t *jobinfo,
 					  void **suspend_info);
 /*
  * Pack data structure containing information needed to suspend or resume
@@ -299,7 +299,7 @@ extern void interconnect_suspend_info_get(switch_jobinfo_t *jobinfo,
  * IN suspend_info - data to be sent with job suspend/resume RPC
  * IN/OUT buffer to hold the data
  */
-extern void interconnect_suspend_info_pack(void *suspend_info, Buf buffer);
+extern void switch_g_suspend_info_pack(void *suspend_info, Buf buffer);
 /*
  * Unpack data structure containing information needed to suspend or resume
  * a job
@@ -308,57 +308,57 @@ extern void interconnect_suspend_info_pack(void *suspend_info, Buf buffer);
  * IN/OUT buffer that holds the data
  * RET SLURM_SUCCESS or error code
  */
-extern int interconnect_suspend_info_unpack(void **suspend_info, Buf buffer);
+extern int switch_g_suspend_info_unpack(void **suspend_info, Buf buffer);
 /*
  * Free data structure containing information needed to suspend or resume
  * a job
  *
  * IN suspend_info - data sent with job suspend/resume RPC
  */
-extern void interconnect_suspend_info_free(void *suspend_info);
+extern void switch_g_suspend_info_free(void *suspend_info);
 
 /*
  * Suspend a job's use of switch resources. This may reset MPI timeout values
- * and/or release switch resources. See also interconnect_resume().
+ * and/or release switch resources. See also switch_g_resume().
  *
  * IN max_wait - maximum number of seconds to wait for operation to complete
  * RET SLURM_SUCCESS or error code
  */
-extern int interconnect_suspend(void *suspend_info, int max_wait);
+extern int switch_g_suspend(void *suspend_info, int max_wait);
 
 /*
- * Resume a job's use of switch resources. See also interconnect_suspend().
+ * Resume a job's use of switch resources. See also switch_g_suspend().
  *
  * IN max_wait - maximum number of seconds to wait for operation to complete
  * RET SLURM_SUCCESS or error code
  */
-extern int interconnect_resume(void *suspend_infoo, int max_wait);
+extern int switch_g_resume(void *suspend_infoo, int max_wait);
 
 /*
- * This function is run from the same process as interconnect_init()
+ * This function is run from the same process as switch_g_init()
  * after all job tasks have exited. It is *not* run as root, because
  * the process in question has already setuid to the job owner.
  *
  */
-extern int interconnect_fini(switch_jobinfo_t *jobinfo);
+extern int switch_g_fini(switch_jobinfo_t *jobinfo);
 
 /*
- * Finalize interconnect on node.
+ * Finalize switch_g on node.
  *
  * This function is run from the initial slurmd process (same process
- * as interconnect_preinit()), and is run as root. Any cleanup routines
+ * as switch_g_preinit()), and is run as root. Any cleanup routines
  * that need to be run with root privileges should be run from this
  * function.
  */
-extern int interconnect_postfini(switch_jobinfo_t *jobinfo, uid_t pgid,
+extern int switch_g_postfini(switch_jobinfo_t *jobinfo, uid_t pgid,
 				uint32_t job_id, uint32_t step_id );
 
 /*
- * attach process to interconnect
+ * attach process to switch_g
  * (Called from within the process, so it is appropriate to set
- * interconnect specific environment variables here)
+ * switch_g specific environment variables here)
  */
-extern int interconnect_attach(switch_jobinfo_t *jobinfo, char ***env,
+extern int switch_g_attach(switch_jobinfo_t *jobinfo, char ***env,
 		uint32_t nodeid, uint32_t procid, uint32_t nnodes,
 		uint32_t nprocs, uint32_t rank);
 
diff --git a/src/plugins/job_container/cncu/job_container_cncu.c b/src/plugins/job_container/cncu/job_container_cncu.c
index 60f2a4e5a3a..64e4ae208b0 100644
--- a/src/plugins/job_container/cncu/job_container_cncu.c
+++ b/src/plugins/job_container/cncu/job_container_cncu.c
@@ -352,12 +352,12 @@ extern int container_p_add_pid(uint32_t job_id, pid_t pid, uid_t uid)
 	memset(&job, 0, sizeof(stepd_step_rec_t));
 	job.jmgr_pid = pid;
 	job.uid = uid;
-	if (slurm_container_create(&job) != SLURM_SUCCESS) {
-		error("%s: slurm_container_create job(%u)", plugin_type,job_id);
+	if (proctrack_g_create(&job) != SLURM_SUCCESS) {
+		error("%s: proctrack_g_create job(%u)", plugin_type,job_id);
 		return SLURM_ERROR;
 	}
 
-	slurm_container_add(&job, pid);
+	proctrack_g_add(&job, pid);
 
 	return container_p_add_cont(job_id, job.cont_id);
 }
diff --git a/src/plugins/job_container/none/job_container_none.c b/src/plugins/job_container/none/job_container_none.c
index 7362582ae57..d883b072720 100644
--- a/src/plugins/job_container/none/job_container_none.c
+++ b/src/plugins/job_container/none/job_container_none.c
@@ -312,13 +312,13 @@ extern int container_p_add_pid(uint32_t job_id, pid_t pid, uid_t uid)
 		memset(&job, 0, sizeof(stepd_step_rec_t));
 		job.jmgr_pid = pid;
 		job.uid = uid;
-		if (slurm_container_create(&job) != SLURM_SUCCESS) {
-			error("%s: slurm_container_create job(%u)",
+		if (proctrack_g_create(&job) != SLURM_SUCCESS) {
+			error("%s: proctrack_g_create job(%u)",
 			      plugin_type, job_id);
 			return SLURM_ERROR;
 		}
 
-		slurm_container_add(&job, pid);
+		proctrack_g_add(&job, pid);
 
 		return container_p_add_cont(job_id, job.cont_id);
 	}
diff --git a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
index 16ebc280db5..1cf5cdeaa2f 100644
--- a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
+++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
@@ -206,7 +206,7 @@ extern void jobacct_gather_p_poll_data(
 
 	if (!pgid_plugin) {
 		/* get only the processes in the proctrack container */
-		slurm_container_get_pids(cont_id, &pids, &npids);
+		porctrack_g_get_pids(cont_id, &pids, &npids);
 		if (!npids) {
 			debug4("no pids in this container %"PRIu64"", cont_id);
 			goto finished;
diff --git a/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c b/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c
index 16c48facb6d..981d72d9f2a 100644
--- a/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c
+++ b/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c
@@ -463,7 +463,7 @@ extern void jobacct_gather_p_poll_data(
 
 	if (!pgid_plugin) {
 		/* get only the processes in the proctrack container */
-		slurm_container_get_pids(cont_id, &pids, &npids);
+		proctrack_g_get_pids(cont_id, &pids, &npids);
 		if (!npids) {
 			/* update consumed energy even if pids do not exist
 			 * any more, by iterating thru jobacctinfo structs */
diff --git a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
index 2015a0d3f53..fad9f77b51a 100644
--- a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
+++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
@@ -550,7 +550,7 @@ extern void jobacct_gather_p_poll_data(
 
 	if (!pgid_plugin) {
 		/* get only the processes in the proctrack container */
-		slurm_container_get_pids(cont_id, &pids, &npids);
+		proctrack_g_get_pids(cont_id, &pids, &npids);
 		if (!npids) {
 			/* update consumed energy even if pids do not exist */
 			itr = list_iterator_create(task_list);
diff --git a/src/plugins/proctrack/aix/proctrack_aix.c b/src/plugins/proctrack/aix/proctrack_aix.c
index bb0bf08f3c4..b7aa253955e 100644
--- a/src/plugins/proctrack/aix/proctrack_aix.c
+++ b/src/plugins/proctrack/aix/proctrack_aix.c
@@ -119,7 +119,7 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_create ( stepd_step_rec_t *job )
+extern int proctrack_p_plugin_create ( stepd_step_rec_t *job )
 {
 	return SLURM_SUCCESS;
 }
@@ -128,7 +128,7 @@ extern int slurm_container_plugin_create ( stepd_step_rec_t *job )
  * Uses job step process group id as a unique identifier.  Job id
  * and step id are not unique by themselves.
  */
-extern int slurm_container_plugin_add ( stepd_step_rec_t *job, pid_t pid )
+extern int proctrack_p_plugin_add ( stepd_step_rec_t *job, pid_t pid )
 {
 	int pgid = (int) job->pgid;
 
@@ -144,7 +144,7 @@ extern int slurm_container_plugin_add ( stepd_step_rec_t *job, pid_t pid )
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_signal  ( uint64_t id, int signal )
+extern int proctrack_p_plugin_signal  ( uint64_t id, int signal )
 {
 	int jobid = (int) id;
 	if (!id)	/* no container ID */
@@ -153,7 +153,7 @@ extern int slurm_container_plugin_signal  ( uint64_t id, int signal )
 	return proctrack_job_kill(&jobid, &signal);
 }
 
-extern int slurm_container_plugin_destroy ( uint64_t id )
+extern int proctrack_p_plugin_destroy ( uint64_t id )
 {
 	int jobid = (int) id;
 
@@ -167,7 +167,7 @@ extern int slurm_container_plugin_destroy ( uint64_t id )
 }
 
 extern uint64_t
-slurm_container_plugin_find(pid_t pid)
+proctrack_p_plugin_find(pid_t pid)
 {
 	int local_pid = (int) pid;
 	int cont_id = proctrack_get_job_id(&local_pid);
@@ -177,7 +177,7 @@ slurm_container_plugin_find(pid_t pid)
 }
 
 extern bool
-slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
+proctrack_p_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
 	int local_pid = (int) pid;
 	int found_cont_id = proctrack_get_job_id(&local_pid);
@@ -189,7 +189,7 @@ slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 }
 
 extern int
-slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
+proctrack_p_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int32_t *p;
 	int np;
@@ -212,7 +212,7 @@ slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 	}
 
 	if (sizeof(int32_t) == sizeof(pid_t)) {
-		debug3("slurm_container_plugin_get_pids: No need to copy "
+		debug3("proctrack_p_plugin_get_pids: No need to copy "
 		       "pids array");
 		*npids = np;
 		*pids = (pid_t *)p;
@@ -221,7 +221,7 @@ slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 		pid_t *p_copy;
 		int i;
 
-		debug3("slurm_container_plugin_get_pids: Must copy pids array");
+		debug3("proctrack_p_plugin_get_pids: Must copy pids array");
 		p_copy = (pid_t *)xmalloc(np * sizeof(pid_t));
 		for (i = 0; i < np; i++) {
 			p_copy[i] = (pid_t)p[i];
@@ -235,7 +235,7 @@ slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 }
 
 extern int
-slurm_container_plugin_wait(uint64_t cont_id)
+proctrack_p_plugin_wait(uint64_t cont_id)
 {
 	int jobid = (int) cont_id;
 	int delay = 1;
@@ -256,7 +256,7 @@ slurm_container_plugin_wait(uint64_t cont_id)
 			int npids = 0;
 			error("Container %"PRIu64" is still not empty", cont_id);
 
-			slurm_container_plugin_get_pids(cont_id, &pids, &npids);
+			proctrack_p_plugin_get_pids(cont_id, &pids, &npids);
 			if (npids > 0) {
 				for (i = 0; i < npids; i++) {
 					verbose("Container %"PRIu64" has pid %d",
diff --git a/src/plugins/proctrack/cgroup/proctrack_cgroup.c b/src/plugins/proctrack/cgroup/proctrack_cgroup.c
index 62a3e73d6bf..6d3ab5b1c42 100644
--- a/src/plugins/proctrack/cgroup/proctrack_cgroup.c
+++ b/src/plugins/proctrack/cgroup/proctrack_cgroup.c
@@ -101,7 +101,7 @@
  * matures.
  */
 const char plugin_name[]      = "Process tracking via linux "
-				"cgroup freezer subsystem";
+	"cgroup freezer subsystem";
 const char plugin_type[]      = "proctrack/cgroup";
 const uint32_t plugin_version = 91;
 
@@ -131,7 +131,7 @@ int _slurm_cgroup_init(void)
 
 	/* initialize freezer cgroup namespace */
 	if (xcgroup_ns_create(&slurm_cgroup_conf, &freezer_ns, "", "freezer")
-	     != XCGROUP_SUCCESS) {
+	    != XCGROUP_SUCCESS) {
 		error("unable to create freezer cgroup namespace");
 		return SLURM_ERROR;
 	}
@@ -181,7 +181,7 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 	/* build job cgroup relative path if no set (should not be) */
 	if (*job_cgroup_path == '\0') {
 		if (snprintf(job_cgroup_path, PATH_MAX, "%s/job_%u",
-			      user_cgroup_path, job->jobid) >= PATH_MAX) {
+			     user_cgroup_path, job->jobid) >= PATH_MAX) {
 			error("unable to build job %u cgroup relative "
 			      "path : %m", job->jobid);
 			return SLURM_ERROR;
@@ -212,8 +212,8 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 
 	/* create user cgroup in the freezer ns (it could already exist) */
 	if (xcgroup_create(&freezer_ns, &user_freezer_cg,
-			    user_cgroup_path,
-			    getuid(), getgid()) != XCGROUP_SUCCESS) {
+			   user_cgroup_path,
+			   getuid(), getgid()) != XCGROUP_SUCCESS) {
 		return SLURM_ERROR;
 	}
 	if (xcgroup_instanciate(&user_freezer_cg) != XCGROUP_SUCCESS) {
@@ -224,8 +224,8 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 
 	/* create job cgroup in the freezer ns (it could already exist) */
 	if (xcgroup_create(&freezer_ns, &job_freezer_cg,
-			    job_cgroup_path,
-			    getuid(), getgid()) != XCGROUP_SUCCESS) {
+			   job_cgroup_path,
+			   getuid(), getgid()) != XCGROUP_SUCCESS) {
 		xcgroup_destroy(&user_freezer_cg);
 		return SLURM_ERROR;
 	}
@@ -237,8 +237,8 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 
 	/* create step cgroup in the freezer ns (it should not exists) */
 	if (xcgroup_create(&freezer_ns, &step_freezer_cg,
-			    jobstep_cgroup_path,
-			    getuid(), getgid()) != XCGROUP_SUCCESS) {
+			   jobstep_cgroup_path,
+			   getuid(), getgid()) != XCGROUP_SUCCESS) {
 		xcgroup_destroy(&user_freezer_cg);
 		xcgroup_destroy(&job_freezer_cg);
 		return SLURM_ERROR;
@@ -354,7 +354,7 @@ _slurm_cgroup_is_pid_a_slurm_task(uint64_t id, pid_t pid)
 	char file_path[PATH_MAX], buf[2048];
 
 	if (snprintf(file_path, PATH_MAX, "/proc/%ld/stat",
-		      (long)pid) >= PATH_MAX) {
+		     (long)pid) >= PATH_MAX) {
 		debug2("unable to build pid '%d' stat file: %m ", pid);
 		return fstatus;
 	}
@@ -425,7 +425,7 @@ extern int fini (void)
 /*
  * Uses slurmd job-step manager's pid as the unique container id.
  */
-extern int slurm_container_plugin_create (stepd_step_rec_t *job)
+extern int proctrack_p_plugin_create (stepd_step_rec_t *job)
 {
 	int fstatus;
 
@@ -456,12 +456,12 @@ extern int slurm_container_plugin_create (stepd_step_rec_t *job)
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_add (stepd_step_rec_t *job, pid_t pid)
+extern int proctrack_p_plugin_add (stepd_step_rec_t *job, pid_t pid)
 {
 	return _slurm_cgroup_add_pids(job->cont_id, &pid, 1);
 }
 
-extern int slurm_container_plugin_signal (uint64_t id, int signal)
+extern int proctrack_p_plugin_signal (uint64_t id, int signal)
 {
 	pid_t* pids = NULL;
 	int npids;
@@ -470,7 +470,7 @@ extern int slurm_container_plugin_signal (uint64_t id, int signal)
 
 	/* get all the pids associated with the step */
 	if (_slurm_cgroup_get_pids(id, &pids, &npids) !=
-	     SLURM_SUCCESS) {
+	    SLURM_SUCCESS) {
 		debug3("unable to get pids list for cont_id=%"PRIu64"", id);
 		/* that could mean that all the processes already exit */
 		/* the container so return success */
@@ -516,12 +516,12 @@ extern int slurm_container_plugin_signal (uint64_t id, int signal)
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_destroy (uint64_t id)
+extern int proctrack_p_plugin_destroy (uint64_t id)
 {
 	return _slurm_cgroup_destroy();
 }
 
-extern uint64_t slurm_container_plugin_find(pid_t pid)
+extern uint64_t proctrack_p_plugin_find(pid_t pid)
 {
 	uint64_t cont_id = -1;
 
@@ -531,12 +531,12 @@ extern uint64_t slurm_container_plugin_find(pid_t pid)
 	return cont_id;
 }
 
-extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
+extern bool proctrack_p_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
 	return _slurm_cgroup_has_pid(pid);
 }
 
-extern int slurm_container_plugin_wait(uint64_t cont_id)
+extern int proctrack_p_plugin_wait(uint64_t cont_id)
 {
 	int delay = 1;
 
@@ -547,8 +547,8 @@ extern int slurm_container_plugin_wait(uint64_t cont_id)
 
 	/* Spin until the container is successfully destroyed */
 	/* This indicates that all tasks have exited the container */
-	while (slurm_container_plugin_destroy(cont_id) != SLURM_SUCCESS) {
-		slurm_container_plugin_signal(cont_id, SIGKILL);
+	while (proctrack_p_plugin_destroy(cont_id) != SLURM_SUCCESS) {
+		proctrack_p_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
@@ -561,8 +561,8 @@ extern int slurm_container_plugin_wait(uint64_t cont_id)
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_get_pids(uint64_t cont_id,
-					   pid_t **pids, int *npids)
+extern int proctrack_p_plugin_get_pids(uint64_t cont_id,
+				       pid_t **pids, int *npids)
 {
 	return _slurm_cgroup_get_pids(cont_id, pids, npids);
 }
diff --git a/src/plugins/proctrack/cray/proctrack_cray.c b/src/plugins/proctrack/cray/proctrack_cray.c
index 8eae8a15d22..e75730de44c 100644
--- a/src/plugins/proctrack/cray/proctrack_cray.c
+++ b/src/plugins/proctrack/cray/proctrack_cray.c
@@ -137,7 +137,7 @@ extern int fini(void)
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_create(stepd_step_rec_t *job)
+extern int proctrack_p_plugin_create(stepd_step_rec_t *job)
 {
 	pthread_attr_t attr;
 
@@ -173,11 +173,11 @@ extern int slurm_container_plugin_create(stepd_step_rec_t *job)
 		slurm_mutex_unlock(&notify_mutex);
 		slurm_mutex_unlock(&thread_mutex);
 
-		debug("slurm_container_plugin_create: created jid "
+		debug("proctrack_p_plugin_create: created jid "
 		      "0x%08lx thread 0x%08lx",
 		      job->cont_id, threadid);
 	} else
-		error("slurm_container_plugin_create: already have a cont_id");
+		error("proctrack_p_plugin_create: already have a cont_id");
 
 	return SLURM_SUCCESS;
 }
@@ -187,7 +187,7 @@ extern int slurm_container_plugin_create(stepd_step_rec_t *job)
  * was created and all of it's spawned tasks are placed into the container
  * when forked, all we need to do is remove the slurmstepd from the container
  * (once) at this time. */
-int slurm_container_plugin_add(stepd_step_rec_t *job, pid_t pid)
+int proctrack_p_plugin_add(stepd_step_rec_t *job, pid_t pid)
 {
 	if (job_attachpid(pid, job->cont_id) == (jid_t) -1)
 		error("Failed to attach pid %d to job container: %m", pid);
@@ -197,7 +197,7 @@ int slurm_container_plugin_add(stepd_step_rec_t *job, pid_t pid)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_plugin_signal(uint64_t id, int sig)
+int proctrack_p_plugin_signal(uint64_t id, int sig)
 {
 	if ((job_killjid((jid_t) id, sig) < 0)
 	   && (errno != ENODATA) && (errno != EBADF) )
@@ -205,7 +205,7 @@ int slurm_container_plugin_signal(uint64_t id, int sig)
 	return (SLURM_SUCCESS);
 }
 
-int slurm_container_plugin_destroy(uint64_t id)
+int proctrack_p_plugin_destroy(uint64_t id)
 {
 	int status;
 
@@ -218,7 +218,7 @@ int slurm_container_plugin_destroy(uint64_t id)
 	return SLURM_SUCCESS;
 }
 
-uint64_t slurm_container_plugin_find(pid_t pid)
+uint64_t proctrack_p_plugin_find(pid_t pid)
 {
 	jid_t jid;
 
@@ -228,7 +228,7 @@ uint64_t slurm_container_plugin_find(pid_t pid)
 	return ((uint64_t) jid);
 }
 
-bool slurm_container_plugin_has_pid (uint64_t cont_id, pid_t pid)
+bool proctrack_p_plugin_has_pid (uint64_t cont_id, pid_t pid)
 {
 	jid_t jid;
 
@@ -240,7 +240,7 @@ bool slurm_container_plugin_has_pid (uint64_t cont_id, pid_t pid)
 	return true;
 }
 
-int slurm_container_plugin_wait(uint64_t id)
+int proctrack_p_plugin_wait(uint64_t id)
 {
 	int status;
 	if (job_waitjid((jid_t) id, &status, 0) == (jid_t)-1)
@@ -249,7 +249,7 @@ int slurm_container_plugin_wait(uint64_t id)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
+int proctrack_p_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int pidcnt, bufsize;
 	pid_t *p;
diff --git a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
index 7610d3d05ff..891e40e6f02 100644
--- a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
+++ b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
@@ -113,33 +113,33 @@ extern int fini ( void )
 /*
  * Uses slurmd job-step manager's pid as the unique container id.
  */
-extern int slurm_container_plugin_create ( stepd_step_rec_t *job )
+extern int proctrack_p_plugin_create ( stepd_step_rec_t *job )
 {
 	job->cont_id = (uint64_t)job->jmgr_pid;
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_add ( stepd_step_rec_t *job, pid_t pid )
+extern int proctrack_p_plugin_add ( stepd_step_rec_t *job, pid_t pid )
 {
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_signal ( uint64_t id, int signal )
+extern int proctrack_p_plugin_signal ( uint64_t id, int signal )
 {
 	return kill_proc_tree((pid_t)id, signal);
 }
 
-extern int slurm_container_plugin_destroy ( uint64_t id )
+extern int proctrack_p_plugin_destroy ( uint64_t id )
 {
 	return SLURM_SUCCESS;
 }
 
-extern uint64_t slurm_container_plugin_find(pid_t pid)
+extern uint64_t proctrack_p_plugin_find(pid_t pid)
 {
 	return (uint64_t) find_ancestor(pid, "slurmstepd");
 }
 
-extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
+extern bool proctrack_p_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
 	uint64_t cont;
 
@@ -151,7 +151,7 @@ extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 }
 
 extern int
-slurm_container_plugin_wait(uint64_t cont_id)
+proctrack_p_plugin_wait(uint64_t cont_id)
 {
 	int delay = 1;
 
@@ -161,8 +161,8 @@ slurm_container_plugin_wait(uint64_t cont_id)
 	}
 
 	/* Spin until the container is successfully destroyed */
-	while (slurm_container_plugin_destroy(cont_id) != SLURM_SUCCESS) {
-		slurm_container_plugin_signal(cont_id, SIGKILL);
+	while (proctrack_p_plugin_destroy(cont_id) != SLURM_SUCCESS) {
+		proctrack_p_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
@@ -175,7 +175,7 @@ slurm_container_plugin_wait(uint64_t cont_id)
 }
 
 extern int
-slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
+proctrack_p_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	return proctrack_linuxproc_get_pids((pid_t)cont_id, pids, npids);
 }
diff --git a/src/plugins/proctrack/lua/proctrack_lua.c b/src/plugins/proctrack/lua/proctrack_lua.c
index e2736929618..18e7b3ffdfd 100644
--- a/src/plugins/proctrack/lua/proctrack_lua.c
+++ b/src/plugins/proctrack/lua/proctrack_lua.c
@@ -193,14 +193,14 @@ static int check_lua_script_functions (void)
 	int rc = 0;
 	int i;
 	const char *fns[] = {
-		"slurm_container_create",
-		"slurm_container_add",
-		"slurm_container_signal",
-		"slurm_container_destroy",
-		"slurm_container_find",
-		"slurm_container_has_pid",
-		"slurm_container_wait",
-		"slurm_container_get_pids",
+		"proctrack_g_create",
+		"proctrack_g_add",
+		"proctrack_g_signal",
+		"proctrack_g_destroy",
+		"proctrack_g_find",
+		"proctrack_g_has_pid",
+		"proctrack_g_wait",
+		"proctrack_g_get_pids",
 		NULL
 	};
 
@@ -320,7 +320,7 @@ static int lua_job_table_create (stepd_step_rec_t *job)
 	return (0);
 }
 
-int slurm_container_plugin_create (stepd_step_rec_t *job)
+int proctrack_p_plugin_create (stepd_step_rec_t *job)
 {
 	int rc = SLURM_ERROR;
 	double id;
@@ -331,13 +331,13 @@ int slurm_container_plugin_create (stepd_step_rec_t *job)
 	 *  All lua script functions should have been verified during
 	 *   initialization:
 	 */
-	lua_getglobal (L, "slurm_container_create");
+	lua_getglobal (L, "proctrack_g_create");
 	if (lua_isnil (L, -1))
 		goto out;
 
 	lua_job_table_create (job);
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("proctrack/lua: %s: slurm_container_plugin_create: %s",
+		error ("proctrack/lua: %s: proctrack_p_plugin_create: %s",
 		       lua_script_path, lua_tostring (L, -1));
 		goto out;
 	}
@@ -347,7 +347,7 @@ int slurm_container_plugin_create (stepd_step_rec_t *job)
 	 */
 	if (lua_isnil (L, -1)) {
 		error ("proctrack/lua: "
-		       "slurm_container_plugin_create did not return id");
+		       "proctrack_p_plugin_create did not return id");
 		lua_pop (L, -1);
 		goto out;
 	}
@@ -363,13 +363,13 @@ out:
 	return rc;
 }
 
-int slurm_container_plugin_add (stepd_step_rec_t *job, pid_t pid)
+int proctrack_p_plugin_add (stepd_step_rec_t *job, pid_t pid)
 {
 	int rc = SLURM_ERROR;
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_add");
+	lua_getglobal (L, "proctrack_g_add");
 	if (lua_isnil (L, -1))
 		goto out;
 
@@ -379,7 +379,7 @@ int slurm_container_plugin_add (stepd_step_rec_t *job, pid_t pid)
 
 	if (lua_pcall (L, 3, 1, 0) != 0) {
 		error ("running lua function "
-		       "'slurm_container_plugin_add': %s",
+		       "'proctrack_p_plugin_add': %s",
 		       lua_tostring (L, -1));
 		goto out;
 	}
@@ -391,13 +391,13 @@ out:
 	return (rc);
 }
 
-int slurm_container_plugin_signal (uint64_t id, int sig)
+int proctrack_p_plugin_signal (uint64_t id, int sig)
 {
 	int rc = SLURM_ERROR;
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_signal");
+	lua_getglobal (L, "proctrack_g_signal");
 	if (lua_isnil (L, -1))
 		goto out;
 
@@ -406,7 +406,7 @@ int slurm_container_plugin_signal (uint64_t id, int sig)
 
 	if (lua_pcall (L, 2, 1, 0) != 0) {
 		error ("running lua function "
-		       "'slurm_container_plugin_signal': %s",
+		       "'proctrack_p_plugin_signal': %s",
 		       lua_tostring (L, -1));
 		goto out;
 	}
@@ -418,13 +418,13 @@ out:
 	return (rc);
 }
 
-int slurm_container_plugin_destroy (uint64_t id)
+int proctrack_p_plugin_destroy (uint64_t id)
 {
 	int rc = SLURM_ERROR;
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_destroy");
+	lua_getglobal (L, "proctrack_g_destroy");
 	if (lua_isnil (L, -1))
 		goto out;
 
@@ -432,7 +432,7 @@ int slurm_container_plugin_destroy (uint64_t id)
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
 		error ("running lua function "
-		       "'slurm_container_plugin_destroy': %s",
+		       "'proctrack_p_plugin_destroy': %s",
 		       lua_tostring (L, -1));
 		goto out;
 	}
@@ -445,20 +445,20 @@ out:
 	return (rc);
 }
 
-uint64_t slurm_container_plugin_find (pid_t pid)
+uint64_t proctrack_p_plugin_find (pid_t pid)
 {
 	uint64_t id = (uint64_t) SLURM_ERROR;
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_find");
+	lua_getglobal (L, "proctrack_g_find");
 	if (lua_isnil (L, -1))
 		goto out;
 
 	lua_pushnumber (L, pid);
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_plugin_find': %s",
+		error ("running lua function 'proctrack_p_plugin_find': %s",
 		       lua_tostring (L, -1));
 		goto out;
 	}
@@ -471,13 +471,13 @@ out:
 	return (id);
 }
 
-bool slurm_container_plugin_has_pid (uint64_t id, pid_t pid)
+bool proctrack_p_plugin_has_pid (uint64_t id, pid_t pid)
 {
 	int rc = 0;
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_has_pid");
+	lua_getglobal (L, "proctrack_g_has_pid");
 	if (lua_isnil (L, -1))
 		goto out;
 
@@ -486,7 +486,7 @@ bool slurm_container_plugin_has_pid (uint64_t id, pid_t pid)
 
 	if (lua_pcall (L, 2, 1, 0) != 0) {
 		error ("running lua function "
-		       "'slurm_container_plugin_has_pid': %s",
+		       "'proctrack_p_plugin_has_pid': %s",
 		       lua_tostring (L, -1));
 		goto out;
 	}
@@ -499,20 +499,20 @@ out:
 	return (rc == 1);
 }
 
-int slurm_container_plugin_wait (uint64_t id)
+int proctrack_p_plugin_wait (uint64_t id)
 {
 	int rc = SLURM_ERROR;
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_wait");
+	lua_getglobal (L, "proctrack_g_wait");
 	if (lua_isnil (L, -1))
 		goto out;
 
 	lua_pushnumber (L, id);
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_plugin_wait': %s",
+		error ("running lua function 'proctrack_p_plugin_wait': %s",
 		       lua_tostring (L, -1));
 		goto out;
 	}
@@ -524,7 +524,7 @@ out:
 	return (rc);
 }
 
-int slurm_container_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids)
+int proctrack_p_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int rc = SLURM_ERROR;
 	int i = 0;
@@ -535,7 +535,7 @@ int slurm_container_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids)
 
 	slurm_mutex_lock (&lua_lock);
 
-	lua_getglobal (L, "slurm_container_get_pids");
+	lua_getglobal (L, "proctrack_g_get_pids");
 	if (lua_isnil (L, -1))
 		goto out;
 
diff --git a/src/plugins/proctrack/pgid/Makefile.in b/src/plugins/proctrack/pgid/Makefile.in
index c4a0639800f..ca379238f6c 100644
--- a/src/plugins/proctrack/pgid/Makefile.in
+++ b/src/plugins/proctrack/pgid/Makefile.in
@@ -23,15 +23,15 @@ am__make_dryrun = \
     am__dry=no; \
     case $$MAKEFLAGS in \
       *\\[\ \	]*) \
-        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
-          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+	echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+	  | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
       *) \
-        for am__flg in $$MAKEFLAGS; do \
-          case $$am__flg in \
-            *=*|--*) ;; \
-            *n*) am__dry=yes; break;; \
-          esac; \
-        done;; \
+	for am__flg in $$MAKEFLAGS; do \
+	  case $$am__flg in \
+	    *=*|--*) ;; \
+	    *n*) am__dry=yes; break;; \
+	  esac; \
+	done;; \
     esac; \
     test $$am__dry = yes; \
   }
@@ -129,7 +129,7 @@ am__uninstall_files_from_dir = { \
   test -z "$$files" \
     || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
     || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
-         $(am__cd) "$$dir" && rm -f $$files; }; \
+	 $(am__cd) "$$dir" && rm -f $$files; }; \
   }
 am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
@@ -410,7 +410,7 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	  case '$(am__configure_deps)' in \
 	    *$$dep*) \
 	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
-	        && { if test -f $@; then exit 0; else break; fi; }; \
+		&& { if test -f $@; then exit 0; else break; fi; }; \
 	      exit 1;; \
 	  esac; \
 	done; \
@@ -467,7 +467,7 @@ clean-pkglibLTLIBRARIES:
 	  echo "rm -f \"$${dir}/so_locations\""; \
 	  rm -f "$${dir}/so_locations"; \
 	done
-proctrack_pgid.la: $(proctrack_pgid_la_OBJECTS) $(proctrack_pgid_la_DEPENDENCIES) $(EXTRA_proctrack_pgid_la_DEPENDENCIES) 
+proctrack_pgid.la: $(proctrack_pgid_la_OBJECTS) $(proctrack_pgid_la_DEPENDENCIES) $(EXTRA_proctrack_pgid_la_DEPENDENCIES)
 	$(proctrack_pgid_la_LINK) -rpath $(pkglibdir) $(proctrack_pgid_la_OBJECTS) $(proctrack_pgid_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
diff --git a/src/plugins/proctrack/pgid/proctrack_pgid.c b/src/plugins/proctrack/pgid/proctrack_pgid.c
index 44ac36d5ba2..bb8af05160a 100644
--- a/src/plugins/proctrack/pgid/proctrack_pgid.c
+++ b/src/plugins/proctrack/pgid/proctrack_pgid.c
@@ -108,7 +108,7 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_create ( stepd_step_rec_t *job )
+extern int proctrack_p_plugin_create ( stepd_step_rec_t *job )
 {
 	return SLURM_SUCCESS;
 }
@@ -116,13 +116,13 @@ extern int slurm_container_plugin_create ( stepd_step_rec_t *job )
 /*
  * Uses job step process group id.
  */
-extern int slurm_container_plugin_add ( stepd_step_rec_t *job, pid_t pid )
+extern int proctrack_p_plugin_add ( stepd_step_rec_t *job, pid_t pid )
 {
 	job->cont_id = (uint64_t)job->pgid;
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_plugin_signal  ( uint64_t id, int signal )
+extern int proctrack_p_plugin_signal  ( uint64_t id, int signal )
 {
 	pid_t pid = (pid_t) id;
 
@@ -137,12 +137,12 @@ extern int slurm_container_plugin_signal  ( uint64_t id, int signal )
 	return SLURM_ERROR;
 }
 
-extern int slurm_container_plugin_destroy ( uint64_t id )
+extern int proctrack_p_plugin_destroy ( uint64_t id )
 {
 	return SLURM_SUCCESS;
 }
 
-extern uint64_t slurm_container_plugin_find(pid_t pid)
+extern uint64_t proctrack_p_plugin_find(pid_t pid)
 {
 	pid_t rc = getpgid(pid);
 
@@ -152,7 +152,7 @@ extern uint64_t slurm_container_plugin_find(pid_t pid)
 		return (uint64_t) rc;
 }
 
-extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
+extern bool proctrack_p_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
 	pid_t pgid = getpgid(pid);
 
@@ -163,7 +163,7 @@ extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 }
 
 extern int
-slurm_container_plugin_wait(uint64_t cont_id)
+proctrack_p_plugin_wait(uint64_t cont_id)
 {
 	pid_t pgid = (pid_t)cont_id;
 	int delay = 1;
@@ -175,7 +175,7 @@ slurm_container_plugin_wait(uint64_t cont_id)
 
 	/* Spin until the process group is gone. */
 	while (killpg(pgid, 0) == 0) {
-		slurm_container_plugin_signal(cont_id, SIGKILL);
+		proctrack_p_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
@@ -188,9 +188,9 @@ slurm_container_plugin_wait(uint64_t cont_id)
 }
 
 extern int
-slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
+proctrack_p_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	error("proctrack/pgid does not implement "
-	      "slurm_container_plugin_get_pids");
+	      "proctrack_p_plugin_get_pids");
 	return SLURM_ERROR;
 }
diff --git a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
index 5eb7f935471..b7362fc567a 100644
--- a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
+++ b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
@@ -192,7 +192,7 @@ int _job_getpidcnt (jid_t jid)
 	return ((*job_ops.getpidcnt) (jid));
 }
 
-int slurm_container_plugin_create (stepd_step_rec_t *job)
+int proctrack_p_plugin_create (stepd_step_rec_t *job)
 {
 	if (!libjob_handle)
 		init();
@@ -212,7 +212,7 @@ int slurm_container_plugin_create (stepd_step_rec_t *job)
  * was created and all of it's spawned tasks are placed into the container
  * when forked, all we need to do is remove the slurmstepd from the container
  * (once) at this time. */
-int slurm_container_plugin_add (stepd_step_rec_t *job, pid_t pid)
+int proctrack_p_plugin_add (stepd_step_rec_t *job, pid_t pid)
 {
 	static bool first = 1;
 
@@ -233,7 +233,7 @@ int slurm_container_plugin_add (stepd_step_rec_t *job, pid_t pid)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_plugin_signal (uint64_t id, int sig)
+int proctrack_p_plugin_signal (uint64_t id, int sig)
 {
 	if ( (_job_killjid ((jid_t) id, sig) < 0)
 	   && (errno != ENODATA) && (errno != EBADF) )
@@ -241,7 +241,7 @@ int slurm_container_plugin_signal (uint64_t id, int sig)
 	return (SLURM_SUCCESS);
 }
 
-int slurm_container_plugin_destroy (uint64_t id)
+int proctrack_p_plugin_destroy (uint64_t id)
 {
 	int status;
 	_job_waitjid ((jid_t) id, &status, 0);
@@ -251,7 +251,7 @@ int slurm_container_plugin_destroy (uint64_t id)
 	return SLURM_SUCCESS;
 }
 
-uint64_t slurm_container_plugin_find (pid_t pid)
+uint64_t proctrack_p_plugin_find (pid_t pid)
 {
 	jid_t jid;
 
@@ -261,7 +261,7 @@ uint64_t slurm_container_plugin_find (pid_t pid)
 	return ((uint64_t) jid);
 }
 
-bool slurm_container_plugin_has_pid (uint64_t cont_id, pid_t pid)
+bool proctrack_p_plugin_has_pid (uint64_t cont_id, pid_t pid)
 {
 	jid_t jid;
 
@@ -273,7 +273,7 @@ bool slurm_container_plugin_has_pid (uint64_t cont_id, pid_t pid)
 	return true;
 }
 
-int slurm_container_plugin_wait (uint64_t id)
+int proctrack_p_plugin_wait (uint64_t id)
 {
 	int status;
 	if (_job_waitjid ((jid_t) id, &status, 0) == (jid_t)-1)
@@ -282,7 +282,7 @@ int slurm_container_plugin_wait (uint64_t id)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
+int proctrack_p_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int pidcnt, bufsize;
 	pid_t *p;
diff --git a/src/plugins/switch/nrt/nrt.c b/src/plugins/switch/nrt/nrt.c
index 3abcef8be10..ce95506b6f1 100644
--- a/src/plugins/switch/nrt/nrt.c
+++ b/src/plugins/switch/nrt/nrt.c
@@ -2847,7 +2847,7 @@ nrt_job_step_complete(slurm_nrt_jobinfo_t *jp, hostlist_t hl)
  * Used by the slurmctld at startup time to restore the allocation
  * status of any job steps that were running at the time the previous
  * slurmctld was shutdown.  Also used to restore the allocation
- * status after a call to switch_clear().
+ * status after a call to switch_g_clear().
  */
 extern int
 nrt_job_step_allocated(slurm_nrt_jobinfo_t *jp, hostlist_t hl)
diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c
index 884fe710b48..d48359b30f4 100644
--- a/src/plugins/task/affinity/task_affinity.c
+++ b/src/plugins/task/affinity/task_affinity.c
@@ -161,26 +161,26 @@ static void _update_bind_type(launch_tasks_request_msg_t *req)
 }
 
 /*
- * task_slurmd_batch_request()
+ * task_p_slurmd_batch_request()
  */
-extern int task_slurmd_batch_request (uint32_t job_id,
-				      batch_job_launch_msg_t *req)
+extern int task_p_slurmd_batch_request (uint32_t job_id,
+					batch_job_launch_msg_t *req)
 {
-	info("task_slurmd_batch_request: %u", job_id);
+	info("task_p_slurmd_batch_request: %u", job_id);
 	batch_bind(req);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_launch_request()
+ * task_p_slurmd_launch_request()
  */
-extern int task_slurmd_launch_request (uint32_t job_id,
-				       launch_tasks_request_msg_t *req,
-				       uint32_t node_id)
+extern int task_p_slurmd_launch_request (uint32_t job_id,
+					 launch_tasks_request_msg_t *req,
+					 uint32_t node_id)
 {
 	char buf_type[100];
 
-	debug("task_slurmd_launch_request: %u.%u %u",
+	debug("task_p_slurmd_launch_request: %u.%u %u",
 	      job_id, req->job_step_id, node_id);
 
 	if (((conf->sockets >= 1)
@@ -190,53 +190,53 @@ extern int task_slurmd_launch_request (uint32_t job_id,
 
 		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
 		debug("task affinity : before lllp distribution cpu bind "
-		     "method is '%s' (%s)", buf_type, req->cpu_bind);
+		      "method is '%s' (%s)", buf_type, req->cpu_bind);
 
 		lllp_distribution(req, node_id);
 
 		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
 		debug("task affinity : after lllp distribution cpu bind "
-		     "method is '%s' (%s)", buf_type, req->cpu_bind);
+		      "method is '%s' (%s)", buf_type, req->cpu_bind);
 	}
 
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_reserve_resources()
+ * task_p_slurmd_reserve_resources()
  */
-extern int task_slurmd_reserve_resources (uint32_t job_id,
-					  launch_tasks_request_msg_t *req,
-					  uint32_t node_id)
+extern int task_p_slurmd_reserve_resources (uint32_t job_id,
+					    launch_tasks_request_msg_t *req,
+					    uint32_t node_id)
 {
-	debug("task_slurmd_reserve_resources: %u", job_id);
+	debug("task_p_slurmd_reserve_resources: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_suspend_job()
+ * task_p_slurmd_suspend_job()
  */
-extern int task_slurmd_suspend_job (uint32_t job_id)
+extern int task_p_slurmd_suspend_job (uint32_t job_id)
 {
-	debug("task_slurmd_suspend_job: %u", job_id);
+	debug("task_p_slurmd_suspend_job: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_resume_job()
+ * task_p_slurmd_resume_job()
  */
-extern int task_slurmd_resume_job (uint32_t job_id)
+extern int task_p_slurmd_resume_job (uint32_t job_id)
 {
-	debug("task_slurmd_resume_job: %u", job_id);
+	debug("task_p_slurmd_resume_job: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_release_resources()
+ * task_p_slurmd_release_resources()
  */
-extern int task_slurmd_release_resources (uint32_t job_id)
+extern int task_p_slurmd_release_resources (uint32_t job_id)
 {
-	debug("task_slurmd_release_resources: %u", job_id);
+	debug("task_p_slurmd_release_resources: %u", job_id);
 
 #if PURGE_CPUSET_DIRS
 	/* NOTE: The notify_on_release flag set in cpuset.c
@@ -254,7 +254,7 @@ extern int task_slurmd_release_resources (uint32_t job_id)
 		}
 #else
 		if (snprintf(base, PATH_MAX, "%s/slurm%u",
-				CPUSET_DIR, job_id) > PATH_MAX) {
+			     CPUSET_DIR, job_id) > PATH_MAX) {
 			error("cpuset path too long");
 			return SLURM_ERROR;
 		}
@@ -293,11 +293,11 @@ extern int task_slurmd_release_resources (uint32_t job_id)
 }
 
 /*
- * task_pre_setuid() is called before setting the UID for the
+ * task_p_pre_setuid() is called before setting the UID for the
  * user to launch his jobs. Use this to create the CPUSET directory
  * and set the owner appropriately.
  */
-extern int task_pre_setuid (stepd_step_rec_t *job)
+extern int task_p_pre_setuid (stepd_step_rec_t *job)
 {
 	char path[PATH_MAX];
 	int rc;
@@ -315,7 +315,7 @@ extern int task_pre_setuid (stepd_step_rec_t *job)
 	}
 #else
 	if (snprintf(path, PATH_MAX, "%s/slurm%u",
-			CPUSET_DIR, job->jobid) > PATH_MAX) {
+		     CPUSET_DIR, job->jobid) > PATH_MAX) {
 		error("cpuset path too long");
 		return SLURM_ERROR;
 	}
@@ -325,22 +325,22 @@ extern int task_pre_setuid (stepd_step_rec_t *job)
 
 	/* if cpuset was built ok, check for cpu frequency setting */
 	if ( !(rc) && (job->cpu_freq != NO_VAL))
- 	     cpu_freq_cpuset_validate(job);
+		cpu_freq_cpuset_validate(job);
 
 	return rc;
 }
 
 /*
- * task_pre_launch() is called prior to exec of application task.
+ * task_p_pre_launch() is called prior to exec of application task.
  *	It is followed by TaskProlog program (from slurm.conf) and
  *	--task-prolog (from srun command line).
  */
-extern int task_pre_launch (stepd_step_rec_t *job)
+extern int task_p_pre_launch (stepd_step_rec_t *job)
 {
 	char base[PATH_MAX], path[PATH_MAX];
 	int rc = SLURM_SUCCESS;
 
-	debug("affinity task_pre_launch:%u.%u, task:%u bind:%u",
+	debug("affinity task_p_pre_launch:%u.%u, task:%u bind:%u",
 	      job->jobid, job->stepid, job->envtp->procid,
 	      job->cpu_bind_type);
 
@@ -356,14 +356,14 @@ extern int task_pre_launch (stepd_step_rec_t *job)
 		}
 #else
 		if (snprintf(base, PATH_MAX, "%s/slurm%u",
-				CPUSET_DIR, job->jobid) > PATH_MAX) {
+			     CPUSET_DIR, job->jobid) > PATH_MAX) {
 			error("cpuset path too long");
 			return SLURM_ERROR;
 		}
 #endif
 		if (snprintf(path, PATH_MAX, "%s/slurm%u.%u_%d",
-				base, job->jobid, job->stepid,
-				job->envtp->localid) > PATH_MAX) {
+			     base, job->jobid, job->stepid,
+			     job->envtp->localid) > PATH_MAX) {
 			error("cpuset path too long");
 			return SLURM_ERROR;
 		}
@@ -381,8 +381,8 @@ extern int task_pre_launch (stepd_step_rec_t *job)
 			reset_cpuset(&new_mask, &cur_mask);
 			if (conf->task_plugin_param & CPU_BIND_CPUSETS) {
 				rc = slurm_set_cpuset(base, path, mypid,
-						sizeof(new_mask),
-						&new_mask);
+						      sizeof(new_mask),
+						      &new_mask);
 				slurm_get_cpuset(path, mypid,
 						 sizeof(cur_mask),
 						 &cur_mask);
@@ -428,7 +428,7 @@ extern int task_pre_launch (stepd_step_rec_t *job)
 
 		cur_mask = numa_get_membind();
 		if (get_memset(&new_mask, job)
-		&&  (!(job->mem_bind_type & MEM_BIND_NONE))) {
+		    &&  (!(job->mem_bind_type & MEM_BIND_NONE))) {
 			numa_set_membind(&new_mask);
 			cur_mask = new_mask;
 		}
@@ -439,10 +439,10 @@ extern int task_pre_launch (stepd_step_rec_t *job)
 }
 
 /*
- * task_pre_launch_priv() is called prior to exec of application task.
+ * task_p_pre_launch_priv() is called prior to exec of application task.
  * in privileged mode, just after slurm_spank_task_init_privileged
  */
-extern int task_pre_launch_priv (stepd_step_rec_t *job)
+extern int task_p_pre_launch_priv (stepd_step_rec_t *job)
 {
 	return SLURM_SUCCESS;
 }
@@ -452,9 +452,9 @@ extern int task_pre_launch_priv (stepd_step_rec_t *job)
  *	It is preceded by --task-epilog (from srun command line)
  *	followed by TaskEpilog program (from slurm.conf).
  */
-extern int task_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
+extern int task_p_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
 {
-	debug("affinity task_post_term: %u.%u, task %d",
+	debug("affinity task_p_post_term: %u.%u, task %d",
 	      job->jobid, job->stepid, task->id);
 
 #if PURGE_CPUSET_DIRS
@@ -473,14 +473,14 @@ extern int task_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
 		}
 #else
 		if (snprintf(base, PATH_MAX, "%s/slurm%u",
-				CPUSET_DIR, job->jobid) > PATH_MAX) {
+			     CPUSET_DIR, job->jobid) > PATH_MAX) {
 			error("cpuset path too long");
 			return SLURM_ERROR;
 		}
 #endif
 		if (snprintf(path, PATH_MAX, "%s/slurm%u.%u_%d",
-				base, job->jobid, job->stepid,
-				job->envtp->localid) > PATH_MAX) {
+			     base, job->jobid, job->stepid,
+			     job->envtp->localid) > PATH_MAX) {
 			error("cpuset path too long");
 			return SLURM_ERROR;
 		}
@@ -492,10 +492,10 @@ extern int task_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
 }
 
 /*
- * task_post_step() is called after termination of the step
+ * task_p_post_step() is called after termination of the step
  * (all the task)
  */
-extern int task_post_step (stepd_step_rec_t *job)
+extern int task_p_post_step (stepd_step_rec_t *job)
 {
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/task/cgroup/task_cgroup.c b/src/plugins/task/cgroup/task_cgroup.c
index eac9ed0b6d8..7743d4f8853 100644
--- a/src/plugins/task/cgroup/task_cgroup.c
+++ b/src/plugins/task/cgroup/task_cgroup.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  task_cgroup.c - Library for task pre-launch and post_termination functions
- *	            for containment using linux cgroup subsystems
+ *		    for containment using linux cgroup subsystems
  *****************************************************************************
  *  Copyright (C) 2009 CEA/DAM/DIF
  *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
@@ -116,7 +116,7 @@ extern int init (void)
 	}
 
 	if (slurm_cgroup_conf.constrain_ram_space ||
-	     slurm_cgroup_conf.constrain_swap_space) {
+	    slurm_cgroup_conf.constrain_swap_space) {
 		use_memory = true;
 		if (task_cgroup_memory_init(&slurm_cgroup_conf) !=
 		    SLURM_SUCCESS) {
@@ -162,64 +162,64 @@ extern int fini (void)
 }
 
 /*
- * task_slurmd_batch_request()
+ * task_p_slurmd_batch_request()
  */
-extern int task_slurmd_batch_request (uint32_t job_id,
-				      batch_job_launch_msg_t *req)
+extern int task_p_slurmd_batch_request (uint32_t job_id,
+					batch_job_launch_msg_t *req)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_launch_request()
+ * task_p_slurmd_launch_request()
  */
-extern int task_slurmd_launch_request (uint32_t job_id,
-				       launch_tasks_request_msg_t *req,
-				       uint32_t node_id)
+extern int task_p_slurmd_launch_request (uint32_t job_id,
+					 launch_tasks_request_msg_t *req,
+					 uint32_t node_id)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_reserve_resources()
+ * task_p_slurmd_reserve_resources()
  */
-extern int task_slurmd_reserve_resources (uint32_t job_id,
-					  launch_tasks_request_msg_t *req,
-					  uint32_t node_id)
+extern int task_p_slurmd_reserve_resources (uint32_t job_id,
+					    launch_tasks_request_msg_t *req,
+					    uint32_t node_id)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_suspend_job()
+ * task_p_slurmd_suspend_job()
  */
-extern int task_slurmd_suspend_job (uint32_t job_id)
+extern int task_p_slurmd_suspend_job (uint32_t job_id)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_resume_job()
+ * task_p_slurmd_resume_job()
  */
-extern int task_slurmd_resume_job (uint32_t job_id)
+extern int task_p_slurmd_resume_job (uint32_t job_id)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_release_resources()
+ * task_p_slurmd_release_resources()
  */
-extern int task_slurmd_release_resources (uint32_t job_id)
+extern int task_p_slurmd_release_resources (uint32_t job_id)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_setuid() is called before setting the UID for the
+ * task_p_pre_setuid() is called before setting the UID for the
  * user to launch his jobs. Use this to create the CPUSET directory
  * and set the owner appropriately.
  */
-extern int task_pre_setuid (stepd_step_rec_t *job)
+extern int task_p_pre_setuid (stepd_step_rec_t *job)
 {
 
 	if (use_cpuset) {
@@ -241,10 +241,10 @@ extern int task_pre_setuid (stepd_step_rec_t *job)
 }
 
 /*
- * task_pre_launch_priv() is called prior to exec of application task.
+ * task_p_pre_launch_priv() is called prior to exec of application task.
  * in privileged mode, just after slurm_spank_task_init_privileged
  */
-extern int task_pre_launch_priv (stepd_step_rec_t *job)
+extern int task_p_pre_launch_priv (stepd_step_rec_t *job)
 {
 
 	if (use_cpuset) {
@@ -266,11 +266,11 @@ extern int task_pre_launch_priv (stepd_step_rec_t *job)
 }
 
 /*
- * task_pre_launch() is called prior to exec of application task.
+ * task_p_pre_launch() is called prior to exec of application task.
  *	It is followed by TaskProlog program (from slurm.conf) and
  *	--task-prolog (from srun command line).
  */
-extern int task_pre_launch (stepd_step_rec_t *job)
+extern int task_p_pre_launch (stepd_step_rec_t *job)
 {
 
 	if (use_cpuset) {
@@ -287,16 +287,16 @@ extern int task_pre_launch (stepd_step_rec_t *job)
  *	It is preceded by --task-epilog (from srun command line)
  *	followed by TaskEpilog program (from slurm.conf).
  */
-extern int task_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
+extern int task_p_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_post_step() is called after termination of the step
+ * task_p_post_step() is called after termination of the step
  * (all the task)
  */
-extern int task_post_step (stepd_step_rec_t *job)
+extern int task_p_post_step (stepd_step_rec_t *job)
 {
 	fini();
 	return SLURM_SUCCESS;
@@ -317,7 +317,7 @@ extern char* task_cgroup_create_slurm_cg (xcgroup_ns_t* ns) {
 	}
 #endif
 
-	/* create slurm cgroup in the ns (it could already exist) 
+	/* create slurm cgroup in the ns (it could already exist)
 	 * disable notify_on_release to avoid the removal/creation
 	 * of this cgroup for each last/first running job on the node */
 	if (xcgroup_create(ns,&slurm_cg,pre,
diff --git a/src/plugins/task/cray/task_cray.c b/src/plugins/task/cray/task_cray.c
index 18ce0ca0093..4c894a6092b 100644
--- a/src/plugins/task/cray/task_cray.c
+++ b/src/plugins/task/cray/task_cray.c
@@ -96,95 +96,95 @@ extern int fini (void)
 }
 
 /*
- * task_slurmd_batch_request()
+ * task_p_slurmd_batch_request()
  */
-extern int task_slurmd_batch_request (uint32_t job_id,
-				      batch_job_launch_msg_t *req)
+extern int task_p_slurmd_batch_request (uint32_t job_id,
+					batch_job_launch_msg_t *req)
 {
-	debug("task_slurmd_batch_request: %u", job_id);
+	debug("task_p_slurmd_batch_request: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_launch_request()
+ * task_p_slurmd_launch_request()
  */
-extern int task_slurmd_launch_request (uint32_t job_id,
-				       launch_tasks_request_msg_t *req,
-				       uint32_t node_id)
+extern int task_p_slurmd_launch_request (uint32_t job_id,
+					 launch_tasks_request_msg_t *req,
+					 uint32_t node_id)
 {
-	debug("task_slurmd_launch_request: %u.%u %u",
+	debug("task_p_slurmd_launch_request: %u.%u %u",
 	      job_id, req->job_step_id, node_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_reserve_resources()
+ * task_p_slurmd_reserve_resources()
  */
-extern int task_slurmd_reserve_resources (uint32_t job_id,
-					  launch_tasks_request_msg_t *req,
-					  uint32_t node_id)
+extern int task_p_slurmd_reserve_resources (uint32_t job_id,
+					    launch_tasks_request_msg_t *req,
+					    uint32_t node_id)
 {
-	debug("task_slurmd_reserve_resources: %u %u", job_id, node_id);
+	debug("task_p_slurmd_reserve_resources: %u %u", job_id, node_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_suspend_job()
+ * task_p_slurmd_suspend_job()
  */
-extern int task_slurmd_suspend_job (uint32_t job_id)
+extern int task_p_slurmd_suspend_job (uint32_t job_id)
 {
-	debug("task_slurmd_suspend_job: %u", job_id);
+	debug("task_p_slurmd_suspend_job: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_resume_job()
+ * task_p_slurmd_resume_job()
  */
-extern int task_slurmd_resume_job (uint32_t job_id)
+extern int task_p_slurmd_resume_job (uint32_t job_id)
 {
-	debug("task_slurmd_resume_job: %u", job_id);
+	debug("task_p_slurmd_resume_job: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_release_resources()
+ * task_p_slurmd_release_resources()
  */
-extern int task_slurmd_release_resources (uint32_t job_id)
+extern int task_p_slurmd_release_resources (uint32_t job_id)
 {
-	debug("task_slurmd_release_resources: %u", job_id);
+	debug("task_p_slurmd_release_resources: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_setuid() is called before setting the UID for the
+ * task_p_pre_setuid() is called before setting the UID for the
  * user to launch his jobs. Use this to create the CPUSET directory
  * and set the owner appropriately.
  */
-extern int task_pre_setuid (stepd_step_rec_t *job)
+extern int task_p_pre_setuid (stepd_step_rec_t *job)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_launch() is called prior to exec of application task.
+ * task_p_pre_launch() is called prior to exec of application task.
  *	It is followed by TaskProlog program (from slurm.conf) and
  *	--task-prolog (from srun command line).
  */
-extern int task_pre_launch (stepd_step_rec_t *job)
+extern int task_p_pre_launch (stepd_step_rec_t *job)
 {
-	debug("task_pre_launch: %u.%u, task %d",
-		job->jobid, job->stepid, job->envtp->procid);
+	debug("task_p_pre_launch: %u.%u, task %d",
+	      job->jobid, job->stepid, job->envtp->procid);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_launch_priv() is called prior to exec of application task.
+ * task_p_pre_launch_priv() is called prior to exec of application task.
  * in privileged mode, just after slurm_spank_task_init_privileged
  */
-extern int task_pre_launch_priv (stepd_step_rec_t *job)
+extern int task_p_pre_launch_priv (stepd_step_rec_t *job)
 {
-	debug("task_pre_launch_priv: %u.%u",
-		job->jobid, job->stepid);
+	debug("task_p_pre_launch_priv: %u.%u",
+	      job->jobid, job->stepid);
 	return SLURM_SUCCESS;
 }
 
@@ -193,18 +193,18 @@ extern int task_pre_launch_priv (stepd_step_rec_t *job)
  *	It is preceded by --task-epilog (from srun command line)
  *	followed by TaskEpilog program (from slurm.conf).
  */
-extern int task_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
+extern int task_p_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
 {
-	debug("task_post_term: %u.%u, task %d",
-		job->jobid, job->stepid, job->envtp->procid);
+	debug("task_p_post_term: %u.%u, task %d",
+	      job->jobid, job->stepid, job->envtp->procid);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_post_step() is called after termination of the step
+ * task_p_post_step() is called after termination of the step
  * (all the task)
  */
-extern int task_post_step (stepd_step_rec_t *job)
+extern int task_p_post_step (stepd_step_rec_t *job)
 {
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/task/none/task_none.c b/src/plugins/task/none/task_none.c
index a42fdc1b2dc..bc8c87dd1bf 100644
--- a/src/plugins/task/none/task_none.c
+++ b/src/plugins/task/none/task_none.c
@@ -100,95 +100,95 @@ extern int fini (void)
 }
 
 /*
- * task_slurmd_batch_request()
+ * task_p_slurmd_batch_request()
  */
-extern int task_slurmd_batch_request (uint32_t job_id,
-				      batch_job_launch_msg_t *req)
+extern int task_p_slurmd_batch_request (uint32_t job_id,
+					batch_job_launch_msg_t *req)
 {
-	debug("task_slurmd_batch_request: %u", job_id);
+	debug("task_p_slurmd_batch_request: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_launch_request()
+ * task_p_slurmd_launch_request()
  */
-extern int task_slurmd_launch_request (uint32_t job_id,
-				       launch_tasks_request_msg_t *req,
-				       uint32_t node_id)
+extern int task_p_slurmd_launch_request (uint32_t job_id,
+					 launch_tasks_request_msg_t *req,
+					 uint32_t node_id)
 {
-	debug("task_slurmd_launch_request: %u.%u %u",
+	debug("task_p_slurmd_launch_request: %u.%u %u",
 	      job_id, req->job_step_id, node_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_reserve_resources()
+ * task_p_slurmd_reserve_resources()
  */
-extern int task_slurmd_reserve_resources (uint32_t job_id,
-					  launch_tasks_request_msg_t *req,
-					  uint32_t node_id)
+extern int task_p_slurmd_reserve_resources (uint32_t job_id,
+					    launch_tasks_request_msg_t *req,
+					    uint32_t node_id)
 {
-	debug("task_slurmd_reserve_resources: %u %u", job_id, node_id);
+	debug("task_p_slurmd_reserve_resources: %u %u", job_id, node_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_suspend_job()
+ * task_p_slurmd_suspend_job()
  */
-extern int task_slurmd_suspend_job (uint32_t job_id)
+extern int task_p_slurmd_suspend_job (uint32_t job_id)
 {
-	debug("task_slurmd_suspend_job: %u", job_id);
+	debug("task_p_slurmd_suspend_job: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_resume_job()
+ * task_p_slurmd_resume_job()
  */
-extern int task_slurmd_resume_job (uint32_t job_id)
+extern int task_p_slurmd_resume_job (uint32_t job_id)
 {
-	debug("task_slurmd_resume_job: %u", job_id);
+	debug("task_p_slurmd_resume_job: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_slurmd_release_resources()
+ * task_p_slurmd_release_resources()
  */
-extern int task_slurmd_release_resources (uint32_t job_id)
+extern int task_p_slurmd_release_resources (uint32_t job_id)
 {
-	debug("task_slurmd_release_resources: %u", job_id);
+	debug("task_p_slurmd_release_resources: %u", job_id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_setuid() is called before setting the UID for the
+ * task_p_pre_setuid() is called before setting the UID for the
  * user to launch his jobs. Use this to create the CPUSET directory
  * and set the owner appropriately.
  */
-extern int task_pre_setuid (stepd_step_rec_t *job)
+extern int task_p_pre_setuid (stepd_step_rec_t *job)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_launch() is called prior to exec of application task.
+ * task_p_pre_launch() is called prior to exec of application task.
  *	It is followed by TaskProlog program (from slurm.conf) and
  *	--task-prolog (from srun command line).
  */
-extern int task_pre_launch (stepd_step_rec_t *job)
+extern int task_p_pre_launch (stepd_step_rec_t *job)
 {
-	debug("task_pre_launch: %u.%u, task %d",
-		job->jobid, job->stepid, job->envtp->procid);
+	debug("task_p_pre_launch: %u.%u, task %d",
+	      job->jobid, job->stepid, job->envtp->procid);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_pre_launch_priv() is called prior to exec of application task.
+ * task_p_pre_launch_priv() is called prior to exec of application task.
  * in privileged mode, just after slurm_spank_task_init_privileged
  */
-extern int task_pre_launch_priv (stepd_step_rec_t *job)
+extern int task_p_pre_launch_priv (stepd_step_rec_t *job)
 {
-	debug("task_pre_launch_priv: %u.%u",
-		job->jobid, job->stepid);
+	debug("task_p_pre_launch_priv: %u.%u",
+	      job->jobid, job->stepid);
 	return SLURM_SUCCESS;
 }
 
@@ -197,18 +197,18 @@ extern int task_pre_launch_priv (stepd_step_rec_t *job)
  *	It is preceded by --task-epilog (from srun command line)
  *	followed by TaskEpilog program (from slurm.conf).
  */
-extern int task_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
+extern int task_p_post_term (stepd_step_rec_t *job, stepd_step_task_info_t *task)
 {
-	debug("task_post_term: %u.%u, task %d",
-		job->jobid, job->stepid, task->id);
+	debug("task_p_post_term: %u.%u, task %d",
+	      job->jobid, job->stepid, task->id);
 	return SLURM_SUCCESS;
 }
 
 /*
- * task_post_step() is called after termination of the step
+ * task_p_post_step() is called after termination of the step
  * (all the task)
  */
-extern int task_post_step (stepd_step_rec_t *job)
+extern int task_p_post_step (stepd_step_rec_t *job)
 {
 	return SLURM_SUCCESS;
 }
diff --git a/src/slurmctld/backup.c b/src/slurmctld/backup.c
index fe5b1188834..6979e245bd0 100644
--- a/src/slurmctld/backup.c
+++ b/src/slurmctld/backup.c
@@ -215,7 +215,7 @@ void run_backup(void)
 	/* clear old state and read new state */
 	lock_slurmctld(config_write_lock);
 	job_fini();
-	if (switch_restore(slurmctld_conf.state_save_location, true)) {
+	if (switch_g_restore(slurmctld_conf.state_save_location, true)) {
 		error("failed to restore switch state");
 		abort();
 	}
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 22683aec4b4..1218167a351 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -466,7 +466,7 @@ int main(int argc, char *argv[])
 			trigger_primary_ctld_res_ctrl();
 			/* Now recover the remaining state information */
 			lock_slurmctld(config_write_lock);
-			if (switch_restore(slurmctld_conf.state_save_location,
+			if (switch_g_restore(slurmctld_conf.state_save_location,
 					   recover ? true : false))
 				fatal(" failed to initialize switch plugin" );
 			if ((error_code = read_slurm_conf(recover, false))) {
@@ -577,7 +577,7 @@ int main(int argc, char *argv[])
 
 		/* termination of controller */
 		dir_name = slurm_get_state_save_location();
-		switch_save(dir_name);
+		switch_g_save(dir_name);
 		xfree(dir_name);
 		slurm_priority_fini();
 		slurmctld_plugstack_fini();
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index e825b56351b..fd6f3272c78 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -9395,7 +9395,7 @@ static void *_switch_suspend_info(struct job_record *job_ptr)
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
 		if (step_ptr->state != JOB_RUNNING)
 			continue;
-		interconnect_suspend_info_get(step_ptr->switch_job,
+		switch_g_suspend_info_get(step_ptr->switch_job,
 					      &switch_suspend_info);
 	}
 	list_iterator_destroy (step_iterator);
@@ -9571,7 +9571,7 @@ static int _job_suspend_switch_test(struct job_record *job_ptr)
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
 		if (step_ptr->state != JOB_RUNNING)
 			continue;
-		rc = interconnect_suspend_test(step_ptr->switch_job);
+		rc = switch_g_suspend_test(step_ptr->switch_job);
 		if (rc != SLURM_SUCCESS)
 			break;
 	}
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 93c9b6ebe23..7057a274f9a 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -272,7 +272,7 @@ static void _free_step_rec(struct step_record *step_ptr)
 	if (step_ptr->switch_job) {
 		switch_g_job_step_complete(step_ptr->switch_job,
 					   step_ptr->step_layout->node_list);
-		switch_free_jobinfo (step_ptr->switch_job);
+		switch_g_free_jobinfo (step_ptr->switch_job);
 	}
 	resv_port_free(step_ptr);
 	checkpoint_free_jobinfo (step_ptr->check_job);
@@ -2277,10 +2277,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 			}
 		}
 
-		if (switch_alloc_jobinfo (&step_ptr->switch_job) < 0)
-			fatal ("step_create: switch_alloc_jobinfo error");
+		if (switch_g_alloc_jobinfo (&step_ptr->switch_job) < 0)
+			fatal ("step_create: switch_g_alloc_jobinfo error");
 
-		if (switch_build_jobinfo(step_ptr->switch_job,
+		if (switch_g_build_jobinfo(step_ptr->switch_job,
 					 step_ptr->step_layout,
 					 step_ptr->network) < 0) {
 			delete_step_record (job_ptr, step_ptr->step_id);
@@ -2983,7 +2983,7 @@ extern int step_partial_comp(step_complete_msg_t *req, uid_t uid,
 			switch_g_job_step_complete(
 				step_ptr->switch_job,
 				step_ptr->step_layout->node_list);
-			switch_free_jobinfo (step_ptr->switch_job);
+			switch_g_free_jobinfo (step_ptr->switch_job);
 			step_ptr->switch_job = NULL;
 		}
 	} else if (switch_g_part_comp() && step_ptr->switch_job) {
@@ -3218,7 +3218,7 @@ extern void dump_job_step_state(struct job_record *job_ptr,
 	if (!step_ptr->batch_step) {
 		pack_slurm_step_layout(step_ptr->step_layout, buffer,
 				       SLURM_PROTOCOL_VERSION);
-		switch_pack_jobinfo(step_ptr->switch_job, buffer);
+		switch_g_pack_jobinfo(step_ptr->switch_job, buffer);
 	}
 	checkpoint_pack_jobinfo(step_ptr->check_job, buffer,
 				SLURM_PROTOCOL_VERSION);
@@ -3299,8 +3299,8 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 			if (unpack_slurm_step_layout(&step_layout, buffer,
 						     protocol_version))
 				goto unpack_error;
-			switch_alloc_jobinfo(&switch_tmp);
-			if (switch_unpack_jobinfo(switch_tmp, buffer))
+			switch_g_alloc_jobinfo(&switch_tmp);
+			if (switch_g_unpack_jobinfo(switch_tmp, buffer))
 				goto unpack_error;
 		}
 		checkpoint_alloc_jobinfo(&check_tmp);
@@ -3356,8 +3356,8 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 			if (unpack_slurm_step_layout(&step_layout, buffer,
 						     protocol_version))
 				goto unpack_error;
-			switch_alloc_jobinfo(&switch_tmp);
-			if (switch_unpack_jobinfo(switch_tmp, buffer))
+			switch_g_alloc_jobinfo(&switch_tmp);
+			if (switch_g_unpack_jobinfo(switch_tmp, buffer))
 				goto unpack_error;
 		}
 		checkpoint_alloc_jobinfo(&check_tmp);
@@ -3478,7 +3478,7 @@ unpack_error:
 	xfree(bit_fmt);
 	xfree(core_job);
 	if (switch_tmp)
-		switch_free_jobinfo(switch_tmp);
+		switch_g_free_jobinfo(switch_tmp);
 	slurm_step_layout_destroy(step_layout);
 	select_g_select_jobinfo_free(select_jobinfo);
 	return SLURM_FAILURE;
diff --git a/src/slurmd/common/proctrack.c b/src/slurmd/common/proctrack.c
index 79c8b9c099e..5d46072f5b2 100644
--- a/src/slurmd/common/proctrack.c
+++ b/src/slurmd/common/proctrack.c
@@ -75,14 +75,14 @@ typedef struct slurm_proctrack_ops {
  * Must be synchronized with slurm_proctrack_ops_t above.
  */
 static const char *syms[] = {
-	"slurm_container_plugin_create",
-	"slurm_container_plugin_add",
-	"slurm_container_plugin_signal",
-	"slurm_container_plugin_destroy",
-	"slurm_container_plugin_find",
-	"slurm_container_plugin_has_pid",
-	"slurm_container_plugin_wait",
-	"slurm_container_plugin_get_pids"
+	"proctrack_p_plugin_create",
+	"proctrack_p_plugin_add",
+	"proctrack_p_plugin_signal",
+	"proctrack_p_plugin_destroy",
+	"proctrack_p_plugin_find",
+	"proctrack_p_plugin_has_pid",
+	"proctrack_p_plugin_wait",
+	"proctrack_p_plugin_get_pids"
 };
 
 static slurm_proctrack_ops_t ops;
@@ -147,11 +147,11 @@ extern int slurm_proctrack_fini(void)
  * Create a container
  * job IN - stepd_step_rec_t structure
  * job->cont_id OUT - Plugin must fill in job->cont_id either here
- *                    or in slurm_container_add()
+ *                    or in proctrack_g_add()
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_create(stepd_step_rec_t * job)
+extern int proctrack_g_create(stepd_step_rec_t * job)
 {
 	if (slurm_proctrack_init() < 0)
 		return 0;
@@ -164,11 +164,11 @@ extern int slurm_container_create(stepd_step_rec_t * job)
  * job IN - stepd_step_rec_t structure
  * pid IN      - process ID to be added to the container
  * job->cont_id OUT - Plugin must fill in job->cont_id either here
- *                    or in slurm_container_create()
+ *                    or in proctrack_g_create()
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_add(stepd_step_rec_t * job, pid_t pid)
+extern int proctrack_g_add(stepd_step_rec_t * job, pid_t pid)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -264,7 +264,7 @@ static void *_sig_agent(void *args)
 		int i, npids = 0, hung_pids = 0;
 		char *stat_fname = NULL;
 
-		if (slurm_container_get_pids(agent_arg_ptr->cont_id, &pids,
+		if (proctrack_g_get_pids(agent_arg_ptr->cont_id, &pids,
 					     &npids) == SLURM_SUCCESS) {
 			hung_pids = 0;
 			for (i = 0; i < npids; i++) {
@@ -311,13 +311,13 @@ static void _spawn_signal_thread(uint64_t cont_id, int signal)
 
 /*
  * Signal all processes within a container
- * cont_id IN - container ID as returned by slurm_container_create()
+ * cont_id IN - container ID as returned by proctrack_g_create()
  * signal IN  - signal to send, if zero then perform error checking
  *              but do not send signal
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_signal(uint64_t cont_id, int signal)
+extern int proctrack_g_signal(uint64_t cont_id, int signal)
 {
 
 
@@ -328,9 +328,9 @@ extern int slurm_container_signal(uint64_t cont_id, int signal)
 		pid_t *pids = NULL;
 		int i, j, npids = 0, hung_pids = 0;
 		char *stat_fname = NULL;
-		if (slurm_container_get_pids(cont_id, &pids, &npids) ==
+		if (proctrack_g_get_pids(cont_id, &pids, &npids) ==
 		    SLURM_SUCCESS) {
-			/* NOTE: slurm_container_get_pids() is not supported
+			/* NOTE: proctrack_g_get_pids() is not supported
 			 * by the proctrack/pgid plugin */
 			for (j = 0; j < 2; j++) {
 				if (j)
@@ -368,7 +368,7 @@ extern int slurm_container_signal(uint64_t cont_id, int signal)
 	return (*(ops.signal)) (cont_id, signal);
 }
 #else
-extern int slurm_container_signal(uint64_t cont_id, int signal)
+extern int proctrack_g_signal(uint64_t cont_id, int signal)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -379,11 +379,11 @@ extern int slurm_container_signal(uint64_t cont_id, int signal)
 
 /*
  * Destroy a container, any processes within the container are not effected
- * cont_id IN - container ID as returned by slurm_container_create()
+ * cont_id IN - container ID as returned by proctrack_g_create()
  *
  * Returns a SLURM errno.
 */
-extern int slurm_container_destroy(uint64_t cont_id)
+extern int proctrack_g_destroy(uint64_t cont_id)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -396,7 +396,7 @@ extern int slurm_container_destroy(uint64_t cont_id)
  *
  * Returns zero if no container found for the given pid.
  */
-extern uint64_t slurm_container_find(pid_t pid)
+extern uint64_t proctrack_g_find(pid_t pid)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -408,7 +408,7 @@ extern uint64_t slurm_container_find(pid_t pid)
  * Return "true" if the container "cont_id" contains the process with
  * ID "pid".
  */
-extern bool slurm_container_has_pid(uint64_t cont_id, pid_t pid)
+extern bool proctrack_g_has_pid(uint64_t cont_id, pid_t pid)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -419,14 +419,14 @@ extern bool slurm_container_has_pid(uint64_t cont_id, pid_t pid)
 /*
  * Wait for all processes within a container to exit.
  *
- * When slurm_container_wait returns SLURM_SUCCESS, the container is considered
- * destroyed.  There is no need to call slurm_container_destroy after
- * a successful call to slurm_container_wait, and in fact it will trigger
+ * When proctrack_g_wait returns SLURM_SUCCESS, the container is considered
+ * destroyed.  There is no need to call proctrack_g_destroy after
+ * a successful call to proctrack_g_wait, and in fact it will trigger
  * undefined behavior.
  *
  * Return SLURM_SUCCESS or SLURM_ERROR.
  */
-extern int slurm_container_wait(uint64_t cont_id)
+extern int proctrack_g_wait(uint64_t cont_id)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -447,7 +447,7 @@ extern int slurm_container_wait(uint64_t cont_id)
  *   plugin does not implement the call.
  */
 extern int
-slurm_container_get_pids(uint64_t cont_id, pid_t ** pids, int *npids)
+proctrack_g_get_pids(uint64_t cont_id, pid_t ** pids, int *npids)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
diff --git a/src/slurmd/common/proctrack.h b/src/slurmd/common/proctrack.h
index 65c577f7925..65d1c07859a 100644
--- a/src/slurmd/common/proctrack.h
+++ b/src/slurmd/common/proctrack.h
@@ -68,67 +68,67 @@ extern int slurm_proctrack_fini(void);
  * Create a container
  * job IN - stepd_step_rec_t structure
  * job->cont_id OUT - Plugin must fill in job->cont_id either here
- *                    or in slurm_container_add()
+ *                    or in proctrack_g_add()
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_create(stepd_step_rec_t *job);
+extern int proctrack_g_create(stepd_step_rec_t *job);
 
 /*
  * Add a process to the specified container
  * job IN - stepd_step_rec_t structure
  * pid IN      - process ID to be added to the container
  * job->cont_id OUT - Plugin must fill in job->cont_id either here
- *                    or in slurm_container_create()
+ *                    or in proctrack_g_create()
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_add(stepd_step_rec_t *job, pid_t pid);
+extern int proctrack_g_add(stepd_step_rec_t *job, pid_t pid);
 
 /*
  * Signal all processes within a container
- * cont_id IN - container ID as returned by slurm_container_create()
+ * cont_id IN - container ID as returned by proctrack_g_create()
  * signal IN  - signal to send, if zero then perform error checking
  *              but do not send signal
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_signal(uint64_t cont_id, int signal);
+extern int proctrack_g_signal(uint64_t cont_id, int signal);
 
 
 /*
  * Destroy or otherwise invalidate a container,
  *	any processes within the container are not affected
- * cont_id IN - container ID as returned by slurm_container_create()
+ * cont_id IN - container ID as returned by proctrack_g_create()
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_destroy(uint64_t cont_id);
+extern int proctrack_g_destroy(uint64_t cont_id);
 
 /*
  * Get container ID for given process ID
  *
  * Returns zero if no container found for the given pid.
  */
-extern uint64_t slurm_container_find(pid_t pid);
+extern uint64_t proctrack_g_find(pid_t pid);
 
 /*
  * Return "true" if the container "cont_id" contains the process with
  * ID "pid".
  */
-extern bool slurm_container_has_pid(uint64_t cont_id, pid_t pid);
+extern bool proctrack_g_has_pid(uint64_t cont_id, pid_t pid);
 
 /*
  * Wait for all processes within a container to exit.
  *
- * When slurm_container_wait returns SLURM_SUCCESS, the container is considered
- * destroyed.  There is no need to call slurm_container_destroy after
- * a successful call to slurm_container_wait, and in fact it will trigger
+ * When proctrack_g_wait returns SLURM_SUCCESS, the container is considered
+ * destroyed.  There is no need to call proctrack_g_destroy after
+ * a successful call to proctrack_g_wait, and in fact it will trigger
  * undefined behavior.
  *
  * Return SLURM_SUCCESS or SLURM_ERROR.
  */
-extern int slurm_container_wait(uint64_t cont_id);
+extern int proctrack_g_wait(uint64_t cont_id);
 
 /*
  * Get all process IDs within a container.
@@ -142,7 +142,7 @@ extern int slurm_container_wait(uint64_t cont_id);
  *   pids NULL), return SLURM_ERROR if container does not exist, or
  *   plugin does not implement the call.
  */
-extern int slurm_container_get_pids(uint64_t cont_id, pid_t **pids, int *npids);
+extern int proctrack_g_get_pids(uint64_t cont_id, pid_t **pids, int *npids);
 
 /* Collect accounting information for all processes within a container */
 
diff --git a/src/slurmd/common/task_plugin.c b/src/slurmd/common/task_plugin.c
index 342fc409a65..8c8caefc638 100644
--- a/src/slurmd/common/task_plugin.c
+++ b/src/slurmd/common/task_plugin.c
@@ -74,17 +74,17 @@ typedef struct slurmd_task_ops {
  * Must be synchronized with slurmd_task_ops_t above.
  */
 static const char *syms[] = {
-	"task_slurmd_batch_request",
-	"task_slurmd_launch_request",
-	"task_slurmd_reserve_resources",
-	"task_slurmd_suspend_job",
-	"task_slurmd_resume_job",
-	"task_slurmd_release_resources",
-	"task_pre_setuid",
-	"task_pre_launch_priv",
-	"task_pre_launch",
-	"task_post_term",
-	"task_post_step",
+	"task_p_slurmd_batch_request",
+	"task_p_slurmd_launch_request",
+	"task_p_slurmd_reserve_resources",
+	"task_p_slurmd_suspend_job",
+	"task_p_slurmd_resume_job",
+	"task_p_slurmd_release_resources",
+	"task_p_pre_setuid",
+	"task_p_pre_launch_priv",
+	"task_p_pre_launch",
+	"task_p_post_term",
+	"task_p_post_step",
 };
 
 static slurmd_task_ops_t *ops = NULL;
@@ -191,7 +191,8 @@ done:
  *
  * RET - slurm error code
  */
-extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req)
+extern int task_g_slurmd_batch_request(uint32_t job_id,
+				       batch_job_launch_msg_t *req)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -212,7 +213,7 @@ extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req)
  *
  * RET - slurm error code
  */
-extern int slurmd_launch_request(uint32_t job_id,
+extern int task_g_slurmd_launch_request(uint32_t job_id,
 				 launch_tasks_request_msg_t *req,
 				 uint32_t node_id)
 {
@@ -236,7 +237,7 @@ extern int slurmd_launch_request(uint32_t job_id,
  *
  * RET - slurm error code
  */
-extern int slurmd_reserve_resources(uint32_t job_id,
+extern int task_g_slurmd_reserve_resources(uint32_t job_id,
 				    launch_tasks_request_msg_t *req,
 				    uint32_t node_id )
 {
@@ -260,7 +261,7 @@ extern int slurmd_reserve_resources(uint32_t job_id,
  *
  * RET - slurm error code
  */
-extern int slurmd_suspend_job(uint32_t job_id)
+extern int task_g_slurmd_suspend_job(uint32_t job_id)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -280,7 +281,7 @@ extern int slurmd_suspend_job(uint32_t job_id)
  *
  * RET - slurm error code
  */
-extern int slurmd_resume_job(uint32_t job_id)
+extern int task_g_slurmd_resume_job(uint32_t job_id)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -300,7 +301,7 @@ extern int slurmd_resume_job(uint32_t job_id)
  *
  * RET - slurm error code
  */
-extern int slurmd_release_resources(uint32_t job_id)
+extern int task_g_slurmd_release_resources(uint32_t job_id)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -323,7 +324,7 @@ extern int slurmd_release_resources(uint32_t job_id)
  *
  * RET - slurm error code
  */
-extern int pre_setuid(stepd_step_rec_t *job)
+extern int task_g_pre_setuid(stepd_step_rec_t *job)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -343,7 +344,7 @@ extern int pre_setuid(stepd_step_rec_t *job)
  *
  * RET - slurm error code
  */
-extern int pre_launch_priv(stepd_step_rec_t *job)
+extern int task_g_pre_launch_priv(stepd_step_rec_t *job)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -363,7 +364,7 @@ extern int pre_launch_priv(stepd_step_rec_t *job)
  *
  * RET - slurm error code
  */
-extern int pre_launch(stepd_step_rec_t *job)
+extern int task_g_pre_launch(stepd_step_rec_t *job)
 {
 	int i, rc = SLURM_SUCCESS;
 
@@ -383,7 +384,7 @@ extern int pre_launch(stepd_step_rec_t *job)
  *
  * RET - slurm error code
  */
-extern int post_term(stepd_step_rec_t *job,
+extern int task_g_post_term(stepd_step_rec_t *job,
 		     stepd_step_task_info_t *task)
 {
 	int i, rc = SLURM_SUCCESS;
@@ -404,7 +405,7 @@ extern int post_term(stepd_step_rec_t *job,
  *
  * RET - slurm error code
  */
-extern int post_step(stepd_step_rec_t *job)
+extern int task_g_post_step(stepd_step_rec_t *job)
 {
 	int i, rc = SLURM_SUCCESS;
 
diff --git a/src/slurmd/common/task_plugin.h b/src/slurmd/common/task_plugin.h
index 487e50ed18f..3126d3189e7 100644
--- a/src/slurmd/common/task_plugin.h
+++ b/src/slurmd/common/task_plugin.h
@@ -67,14 +67,15 @@ extern int slurmd_task_fini(void);
  *
  * RET - slurm error code
  */
-extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req);
+extern int task_g_slurmd_batch_request(uint32_t job_id,
+				       batch_job_launch_msg_t *req);
 
 /*
  * Slurmd has received a launch request.
  *
  * RET - slurm error code
  */
-extern int slurmd_launch_request(uint32_t job_id,
+extern int task_g_slurmd_launch_request(uint32_t job_id,
 				 launch_tasks_request_msg_t *req,
 				 uint32_t node_id );
 
@@ -83,7 +84,7 @@ extern int slurmd_launch_request(uint32_t job_id,
  *
  * RET - slurm error code
  */
-extern int slurmd_reserve_resources(uint32_t job_id,
+extern int task_g_slurmd_reserve_resources(uint32_t job_id,
 				    launch_tasks_request_msg_t *req,
 				    uint32_t node_id );
 
@@ -92,21 +93,21 @@ extern int slurmd_reserve_resources(uint32_t job_id,
  *
  * RET - slurm error code
  */
-extern int slurmd_suspend_job(uint32_t job_id);
+extern int task_g_slurmd_suspend_job(uint32_t job_id);
 
 /*
  * Slurmd is resuming a previously suspended job.
  *
  * RET - slurm error code
  */
-extern int slurmd_resume_job(uint32_t job_id);
+extern int task_g_slurmd_resume_job(uint32_t job_id);
 
 /*
  * Slurmd is releasing resources for the task.
  *
  * RET - slurm error code
  */
-extern int slurmd_release_resources(uint32_t job_id);
+extern int task_g_slurmd_release_resources(uint32_t job_id);
 
 /*
  * Note that a task launch is about to occur.
@@ -114,34 +115,35 @@ extern int slurmd_release_resources(uint32_t job_id);
  *
  * RET - slurm error code
  */
-extern int pre_setuid(stepd_step_rec_t *job);
+extern int task_g_pre_setuid(stepd_step_rec_t *job);
 
 /*
  * Note in privileged mode that a task launch is about to occur.
  *
  * RET - slurm error code
  */
-extern int pre_launch_priv(stepd_step_rec_t *job);
+extern int task_g_pre_launch_priv(stepd_step_rec_t *job);
 
 /*
  * Note that a task launch is about to occur.
  *
  * RET - slurm error code
  */
-extern int pre_launch(stepd_step_rec_t *job);
+extern int task_g_pre_launch(stepd_step_rec_t *job);
 
 /*
  * Note that a task has terminated.
  *
  * RET - slurm error code
  */
-extern int post_term(stepd_step_rec_t *job, stepd_step_task_info_t *task);
+extern int task_g_post_term(stepd_step_rec_t *job,
+			    stepd_step_task_info_t *task);
 
 /*
  * Note that a step has terminated.
  *
  * RET - slurm error code
  */
-extern int post_step(stepd_step_rec_t *job);
+extern int task_g_post_step(stepd_step_rec_t *job);
 
 #endif /* _SLURMD_TASK_PLUGIN_H_ */
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index ff34b797342..2a7e4ad8cc6 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -1037,7 +1037,7 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 	memcpy(&req->orig_addr, &msg->orig_addr, sizeof(slurm_addr_t));
 
-	slurmd_launch_request(req->job_id, req, nodeid);
+	task_g_slurmd_launch_request(req->job_id, req, nodeid);
 
 	super_user = _slurm_authorized_user(req_uid);
 
@@ -1146,7 +1146,7 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 
 	} else if (errnum == SLURM_SUCCESS) {
 		save_cred_state(conf->vctx);
-		slurmd_reserve_resources(req->job_id, req, nodeid);
+		task_g_slurmd_reserve_resources(req->job_id, req, nodeid);
 	}
 
 	/*
@@ -1366,7 +1366,7 @@ _rpc_batch_job(slurm_msg_t *msg, bool new_msg)
 		goto done;
 	}
 
-	slurmd_batch_request(req->job_id, req);	/* determine task affinity */
+	task_g_slurmd_batch_request(req->job_id, req);	/* determine task affinity */
 
 	if ((req->step_id != SLURM_BATCH_SCRIPT) && (req->step_id != 0))
 		first_job_run = false;
@@ -3146,7 +3146,7 @@ _job_still_running(uint32_t job_id)
 
 /*
  * Wait until all job steps are in SLURMSTEPD_NOT_RUNNING state.
- * This indicates that interconnect_postfini has completed and
+ * This indicates that switch_g_postfini has completed and
  * freed the switch windows (as needed only for Federation switch).
  */
 static void
@@ -3458,13 +3458,13 @@ _rpc_suspend_job(slurm_msg_t *msg)
 	}
 
 	if ((req->op == SUSPEND_JOB) && (req->indf_susp))
-		interconnect_suspend(req->switch_info, 5);
+		switch_g_suspend(req->switch_info, 5);
 
 	/* Release or reclaim resources bound to these tasks (task affinity) */
 	if (req->op == SUSPEND_JOB)
-		(void) slurmd_suspend_job(req->job_id);
+		(void) task_g_slurmd_suspend_job(req->job_id);
 	else
-		(void) slurmd_resume_job(req->job_id);
+		(void) task_g_slurmd_resume_job(req->job_id);
 
 	/*
 	 * Loop through all job steps and call stepd_suspend or stepd_resume
@@ -3531,7 +3531,7 @@ _rpc_suspend_job(slurm_msg_t *msg)
 	list_destroy(steps);
 
 	if ((req->op == RESUME_JOB) && (req->indf_susp))
-		interconnect_resume(req->switch_info, 5);
+		switch_g_resume(req->switch_info, 5);
 
 	_unlock_suspend_job(req->job_id);
 
@@ -3561,7 +3561,7 @@ _rpc_abort_job(slurm_msg_t *msg)
 		return;
 	}
 
-	slurmd_release_resources(req->job_id);
+	task_g_slurmd_release_resources(req->job_id);
 
 	/*
 	 * "revoke" all future credentials for this jobid
@@ -3632,7 +3632,7 @@ _rpc_terminate_batch_job(uint32_t job_id, uint32_t user_id, char *node_name)
 	time_t		now = time(NULL);
 	slurm_ctl_conf_t *cf;
 
-	slurmd_release_resources(job_id);
+	task_g_slurmd_release_resources(job_id);
 
 	if (_waiter_init(job_id) == SLURM_ERROR)
 		return;
@@ -3812,7 +3812,7 @@ _rpc_terminate_job(slurm_msg_t *msg)
 		return;
 	}
 
-	slurmd_release_resources(req->job_id);
+	task_g_slurmd_release_resources(req->job_id);
 
 	/*
 	 *  Initialize a "waiter" thread for this jobid. If another
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index 289be793d19..bac4f18f8d7 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -299,7 +299,7 @@ main (int argc, char *argv[])
 		fatal("Unable to initialize job_container plugin.");
 	if (container_g_restore(conf->spooldir, !conf->cleanstart))
 		error("Unable to restore job_container state.");
-	if (interconnect_node_init() < 0)
+	if (switch_g_node_init() < 0)
 		fatal("Unable to initialize interconnect.");
 	if (conf->cleanstart && switch_g_clear_node_state())
 		fatal("Unable to clear interconnect state.");
@@ -336,7 +336,7 @@ main (int argc, char *argv[])
 
 	_wait_for_all_threads();
 
-	interconnect_node_fini();
+	switch_g_node_fini();
 
 	_slurmd_fini();
 	_destroy_conf();
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index f0de161542d..f6649cdc68a 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -302,14 +302,14 @@ static int _call_select_plugin_from_stepd(stepd_step_rec_t *job, uint64_t pagg_i
 
 static int _select_cray_plugin_job_ready(stepd_step_rec_t *job)
 {
-	uint64_t pagg_id = slurm_container_find(job->jmgr_pid);
+	uint64_t pagg_id = proctrack_g_find(job->jmgr_pid);
 
 	if (pagg_id == 0) {
 		error("no PAGG ID: job service disabled on this host?");
 		/*
 		 * If this process is not attached to a container, there is no
 		 * sense in trying to use the SID as fallback, since the call to
-		 * slurm_container_add() in _fork_all_tasks() will fail later.
+		 * proctrack_g_add() in _fork_all_tasks() will fail later.
 		 * Hence drain the node until sgi_job returns proper PAGG IDs.
 		 */
 		return READY_JOB_FATAL;
@@ -922,21 +922,21 @@ job_manager(stepd_step_rec_t *job)
 	}
 
 	if (!job->batch &&
-	    (interconnect_preinit(job->switch_job) < 0)) {
+	    (switch_g_preinit(job->switch_job) < 0)) {
 		rc = ESLURM_INTERCONNECT_FAILURE;
 		goto fail1;
 	}
 
 	if ((job->cont_id == 0) &&
-	    (slurm_container_create(job) != SLURM_SUCCESS)) {
-		error("slurm_container_create: %m");
+	    (proctrack_g_create(job) != SLURM_SUCCESS)) {
+		error("proctrack_g_create: %m");
 		rc = ESLURMD_SETUP_ENVIRONMENT_ERROR;
 		goto fail1;
 	}
 
 #ifdef HAVE_ALPS_CRAY
 	/*
-	 * Note that the previously called slurm_container_create function is   
+	 * Note that the previously called proctrack_g_create function is
 	 * mandatory since the select/cray plugin needs the job container
 	 * ID in order to CONFIRM the ALPS reservation.
 	 * It is not a good idea to perform this setup in _fork_all_tasks(),
@@ -975,10 +975,10 @@ job_manager(stepd_step_rec_t *job)
 	}
 	debug2("After call to spank_init()");
 
-	/* Call interconnect_init() before becoming user */
+	/* Call switch_g_init() before becoming user */
 	if (!job->batch && job->argv &&
-	    (interconnect_init(job->switch_job, job->uid, job->argv[0]) < 0)) {
-		/* error("interconnect_init: %m"); already logged */
+	    (switch_g_init(job->switch_job, job->uid, job->argv[0]) < 0)) {
+		/* error("switch_g_init: %m"); already logged */
 		rc = ESLURM_INTERCONNECT_FAILURE;
 		goto fail2;
 	}
@@ -1036,13 +1036,13 @@ job_manager(stepd_step_rec_t *job)
 	job->state = SLURMSTEPD_STEP_ENDING;
 
 	if (!job->batch &&
-	    (interconnect_fini(job->switch_job) < 0)) {
-		error("interconnect_fini: %m");
+	    (switch_g_fini(job->switch_job) < 0)) {
+		error("switch_g_fini: %m");
 	}
 
 fail2:
 	/*
-	 * First call interconnect_postfini() - In at least one case,
+	 * First call switch_g_postfini() - In at least one case,
 	 * this will clean up any straggling processes. If this call
 	 * is moved behind wait_for_io(), we may block waiting for IO
 	 * on a hung process.
@@ -1050,18 +1050,18 @@ fail2:
 	 * Make sure all processes in session are dead. On systems
 	 * with an IBM Federation switch, all processes must be
 	 * terminated before the switch window can be released by
-	 * interconnect_postfini().
+	 * switch_g_postfini().
 	 */
 	step_terminate_monitor_start(job->jobid, job->stepid);
 	if (job->cont_id != 0) {
-		slurm_container_signal(job->cont_id, SIGKILL);
-		slurm_container_wait(job->cont_id);
+		proctrack_g_signal(job->cont_id, SIGKILL);
+		proctrack_g_wait(job->cont_id);
 	}
 	step_terminate_monitor_stop();
 	if (!job->batch) {
-		if (interconnect_postfini(job->switch_job, job->jmgr_pid,
+		if (switch_g_postfini(job->switch_job, job->jmgr_pid,
 					  job->jobid, job->stepid) < 0)
-			error("interconnect_postfini: %m");
+			error("switch_g_postfini: %m");
 	}
 
 	/*
@@ -1081,7 +1081,7 @@ fail2:
 	 * Warn task plugin that the user's step have terminated
 	 */
 
-	post_step(job);
+	task_g_post_step(job);
 
 	/*
 	 * This just cleans up all of the PAM state in case rc == 0
@@ -1128,7 +1128,7 @@ _pre_task_privileged(stepd_step_rec_t *job, int taskid, struct priv_state *sp)
 	if (spank_task_privileged (job, taskid) < 0)
 		return error("spank_task_init_privileged failed");
 
-	if (pre_launch_priv(job) < 0)
+	if (task_g_pre_launch_priv(job) < 0)
 		return error("pre_launch_priv failed");
 
 	return(_drop_privileges (job, true, sp, false));
@@ -1232,7 +1232,7 @@ static int exec_wait_signal_child (struct exec_wait_info *e)
 static int exec_wait_signal (struct exec_wait_info *e, stepd_step_rec_t *job)
 {
 	debug3 ("Unblocking %u.%u task %d, writefd = %d",
-	        job->jobid, job->stepid, e->id, e->parentfd);
+		job->jobid, job->stepid, e->id, e->parentfd);
 	exec_wait_signal_child (e);
 	return (0);
 }
@@ -1322,7 +1322,7 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 	xassert(job != NULL);
 
 	set_oom_adj(0);	/* the tasks may be killed by OOM */
-	if (pre_setuid(job)) {
+	if (task_g_pre_setuid(job)) {
 		error("Failed task affinity setup");
 		return SLURM_ERROR;
 	}
@@ -1523,9 +1523,9 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 			      job->pgid);
 		}
 
-		if (slurm_container_add(job, job->task[i]->pid)
+		if (proctrack_g_add(job, job->task[i]->pid)
 		    == SLURM_ERROR) {
-			error("slurm_container_add: %m");
+			error("proctrack_g_add: %m");
 			rc = SLURM_ERROR;
 			goto fail2;
 		}
@@ -1753,7 +1753,7 @@ _wait_for_any_task(stepd_step_rec_t *job, bool waitflag)
 				error ("Unable to spank task %d at exit",
 				       t->id);
 			}
-			post_term(job, t);
+			task_g_post_term(job, t);
 		}
 
 	} while ((pid > 0) && !waitflag);
diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c
index 4fb25d373dc..5a956d0c4a1 100644
--- a/src/slurmd/slurmstepd/req.c
+++ b/src/slurmd/slurmstepd/req.c
@@ -756,7 +756,7 @@ _handle_signal_container(int fd, stepd_step_rec_t *job, uid_t uid)
 	/*
 	 * Signal the container
 	 */
-	if (slurm_container_signal(job->cont_id, sig) < 0) {
+	if (proctrack_g_signal(job->cont_id, sig) < 0) {
 		rc = -1;
 		errnum = errno;
 		verbose("Error sending signal %d to %u.%u: %m",
@@ -934,7 +934,7 @@ _handle_terminate(int fd, stepd_step_rec_t *job, uid_t uid)
 		      job->jobid, job->stepid);
 	}
 
-	if (slurm_container_signal(job->cont_id, SIGKILL) < 0) {
+	if (proctrack_g_signal(job->cont_id, SIGKILL) < 0) {
 		rc = -1;
 		errnum = errno;
 		verbose("Error sending SIGKILL signal to %u.%u: %m",
@@ -1046,7 +1046,7 @@ _handle_pid_in_container(int fd, stepd_step_rec_t *job)
 
 	safe_read(fd, &pid, sizeof(pid_t));
 
-	rc = slurm_container_has_pid(job->cont_id, pid);
+	rc = proctrack_g_has_pid(job->cont_id, pid);
 
 	/* Send the return code */
 	safe_write(fd, &rc, sizeof(bool));
@@ -1124,14 +1124,14 @@ _handle_suspend(int fd, stepd_step_rec_t *job, uid_t uid)
 		if (launch_poe == 0) {
 			/* IBM MPI seens to periodically hang upon receipt
 			 * of SIGTSTP. */
-			if (slurm_container_signal(job->cont_id, SIGTSTP) < 0) {
+			if (proctrack_g_signal(job->cont_id, SIGTSTP) < 0) {
 				verbose("Error suspending %u.%u (SIGTSTP): %m",
 					job->jobid, job->stepid);
 			} else
 				sleep(2);
 		}
 
-		if (slurm_container_signal(job->cont_id, SIGSTOP) < 0) {
+		if (proctrack_g_signal(job->cont_id, SIGSTOP) < 0) {
 			verbose("Error suspending %u.%u (SIGSTOP): %m",
 				job->jobid, job->stepid);
 		} else {
@@ -1191,7 +1191,7 @@ _handle_resume(int fd, stepd_step_rec_t *job, uid_t uid)
 		pthread_mutex_unlock(&suspend_mutex);
 		goto done;
 	} else {
-		if (slurm_container_signal(job->cont_id, SIGCONT) < 0) {
+		if (proctrack_g_signal(job->cont_id, SIGCONT) < 0) {
 			verbose("Error resuming %u.%u: %m",
 				job->jobid, job->stepid);
 		} else {
@@ -1396,7 +1396,7 @@ _handle_list_pids(int fd, stepd_step_rec_t *job)
 	uint32_t pid;
 
 	debug("_handle_list_pids for job %u.%u", job->jobid, job->stepid);
-	slurm_container_get_pids(job->cont_id, &pids, &npids);
+	proctrack_g_get_pids(job->cont_id, &pids, &npids);
 	safe_write(fd, &npids, sizeof(uint32_t));
 	for (i = 0; i < npids; i++) {
 		pid = (uint32_t)pids[i];
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index b024ae67829..fac4fc8e3e7 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -381,7 +381,7 @@ exec_task(stepd_step_rec_t *job, int i)
 	}
 
 	if (!job->batch) {
-		if (interconnect_attach(job->switch_job, &job->env,
+		if (switch_g_attach(job->switch_job, &job->env,
 				job->nodeid, (uint32_t) i, job->nnodes,
 				job->ntasks, task->gtid) < 0) {
 			error("Unable to attach to interconnect: %m");
@@ -404,7 +404,7 @@ exec_task(stepd_step_rec_t *job, int i)
 	}
 
 	/* task plugin hook */
-	if (pre_launch(job)) {
+	if (task_g_pre_launch(job)) {
 		error ("Failed task affinity setup");
 		exit (1);
 	}
-- 
GitLab