diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index 3ea49c0d15d108b25c592f597019352cb668e774..4514015ca8c66aee18e97a5f1c314272abc2b479 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -29,9 +29,9 @@
  *
  *  The functions below permit slurm to initiate parallel tasks as a 
  *  detached thread and let the functions below make sure the work happens. 
- *  For example, when a job step completes slurmctld needs to revoke  
- *  credentials for that job step on every node to which it was allocated.  
- *  We don't want to hang slurmctld's primary function (the job complete RPC)  
+ *  For example, when a job's time limit is to be changed slurmctld needs 
+ *  to notify the slurmd on every node to which the job was allocated.  
+ *  We don't want to hang slurmctld's primary function (the job update RPC)  
  *  to perform this work, so it just initiates an agent to perform the work.  
  *  The agent is passed all details required to perform the work, so it will 
  *  be possible to execute the agent as an pthread, process, or even a daemon 
@@ -47,8 +47,8 @@
  *  All the state for each thread is maintained in thd_t struct, which is 
  *  used by the watchdog thread as well as the communication threads.
  *
- *  NOTE: REQUEST_REVOKE_JOB_CREDENTIAL  and REQUEST_KILL_TIMELIMIT responses 
- *  are handled immediately rather than in bulk upon completion of the RPC 
+ *  NOTE: REQUEST_KILL_JOB  and REQUEST_KILL_TIMELIMIT responses are 
+ *  handled immediately rather than in bulk upon completion of the RPC 
  *  to all nodes
 \*****************************************************************************/
 
@@ -260,7 +260,7 @@ static int _valid_agent_arg(agent_arg_t *agent_arg_ptr)
 		fatal("agent passed NULL address list");
 	if (agent_arg_ptr->node_names == NULL)
 		fatal("agent passed NULL node name list");
-	if ((agent_arg_ptr->msg_type != REQUEST_REVOKE_JOB_CREDENTIAL) &&
+	if ((agent_arg_ptr->msg_type != REQUEST_KILL_JOB) &&
 	    (agent_arg_ptr->msg_type != REQUEST_KILL_TIMELIMIT) && 
 	    (agent_arg_ptr->msg_type != REQUEST_NODE_REGISTRATION_STATUS) && 
 	    (agent_arg_ptr->msg_type != REQUEST_KILL_TASKS) && 
@@ -434,7 +434,7 @@ static void *_wdog(void *args)
 	/* attempt to schedule when all nodes registered, not 
 	 * after each node, the overhead would be too high */
 	if ((agent_ptr->msg_type == REQUEST_KILL_TIMELIMIT) ||
-	    (agent_ptr->msg_type == REQUEST_REVOKE_JOB_CREDENTIAL))
+	    (agent_ptr->msg_type == REQUEST_KILL_JOB))
 		schedule();
 #else
 	/* Build a list of all responding nodes and send it to slurmctld to 
@@ -464,8 +464,7 @@ static void *_wdog(void *args)
 }
 
 /*
- * _thread_per_node_rpc - thread to revoke a credential on a collection 
- *	of nodes
+ * _thread_per_node_rpc - thread to issue an RPC on a collection of nodes
  * IN/OUT args - pointer to task_info_t, xfree'd on completion
  */
 static void *_thread_per_node_rpc(void *args)
@@ -567,17 +566,15 @@ static void *_thread_per_node_rpc(void *args)
 #if AGENT_IS_THREAD
 	/* SPECIAL CASE: Immediately mark node as IDLE */
 	if (((task_ptr->msg_type == REQUEST_KILL_TIMELIMIT) ||
-	     (task_ptr->msg_type == REQUEST_REVOKE_JOB_CREDENTIAL)) &&
+	     (task_ptr->msg_type == REQUEST_KILL_JOB)) &&
 	    (node_ptr = find_node_record(thread_ptr->node_name))) {
-		revoke_credential_msg_t *revoke_job_cred;
-		revoke_job_cred = (revoke_credential_msg_t *)
-				   task_ptr->msg_args_ptr;
+		kill_job_msg_t *kill_job;
+		kill_job = (kill_job_msg_t *) task_ptr->msg_args_ptr;
 		node_ptr = find_node_record(thread_ptr->node_name);
-		debug3("Revoke on node %s job_id %u",
-		       thread_ptr->node_name, revoke_job_cred->job_id);
+		debug3("Kill job_id %u on node %s ",
+		       kill_job->job_id, thread_ptr->node_name);
 		lock_slurmctld(node_write_lock);
-		make_node_idle(node_ptr, 
-			       find_job_record(revoke_job_cred->job_id));
+		make_node_idle(node_ptr, find_job_record(kill_job->job_id));
 		unlock_slurmctld(node_write_lock);
 		/* scheduler(); Overhead too high, 
 		 * only do when last node registers */
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index a891603213db6df43d7184c4ba11e4f9bc0d498d..3aaf32d1b363ccd8435ffef5d622f617b3db7889 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -48,7 +48,6 @@
 
 #include <slurm/slurm_errno.h>
 
-#include "src/common/credential_utils.h"
 #include "src/common/daemonize.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
@@ -56,6 +55,7 @@
 #include "src/common/pack.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_auth.h"
+#include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xstring.h"
 
@@ -68,6 +68,7 @@
 #include "src/slurmctld/slurmctld.h"
 
 #define BUF_SIZE	  1024	/* Temporary buffer size */
+#define CRED_LIFE         60	/* Job credential lifetime in seconds */
 #define DEFAULT_DAEMONIZE 1	/* Run as daemon by default if set */
 #define DEFAULT_RECOVER   1	/* Recover state by default if set */
 #define MIN_CHECKIN_TIME  3	/* Nodes have this number of seconds to 
@@ -85,7 +86,6 @@ log_options_t log_opts = LOG_OPTS_INITIALIZER;
 
 /* Global variables */
 slurm_ctl_conf_t slurmctld_conf;
-extern slurm_ssl_key_ctx_t sign_ctx;
 
 /* Local variables */
 static int	daemonize = DEFAULT_DAEMONIZE;
@@ -96,6 +96,7 @@ static bool	resume_backup = false;
 static time_t	shutdown_time = (time_t) 0;
 static int	server_thread_count = 0;
 static pid_t	slurmctld_pid;
+static slurm_cred_ctx_t cred_ctx;
 
 #ifdef WITH_PTHREADS
 	static pthread_mutex_t thread_count_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -113,6 +114,8 @@ static int          _background_process_msg(slurm_msg_t * msg);
 static void *       _background_rpc_mgr(void *no_data);
 static void *       _background_signal_hand(void *no_data);
 static void         _fill_ctld_conf(slurm_ctl_conf_t * build_ptr);
+static int          _make_step_cred(struct step_record *step_rec, 
+				    slurm_cred_t slurm_cred);
 static void         _parse_commandline(int argc, char *argv[], 
                                        slurm_ctl_conf_t *);
 static int          _ping_controller(void);
@@ -149,6 +152,7 @@ static void *       _slurmctld_rpc_mgr(void *no_data);
 static void         _init_pidfile(void);
 inline static int   _slurmctld_shutdown(void);
 static void *       _slurmctld_signal_hand(void *no_data);
+inline static void  _update_cred_key(void);
 inline static void  _update_logging(void);
 inline static void  _usage(char *prog_name);
 
@@ -216,10 +220,12 @@ int main(int argc, char *argv[])
 	if ((error_code = getnodename(node_name, MAX_NAME_LEN)))
 		fatal("getnodename error %s", slurm_strerror(error_code));
 
-	/* init ssl job credential stuff */
-	slurm_ssl_init();
-	slurm_init_signer(&sign_ctx,
-			  slurmctld_conf.job_credential_private_key);
+	/* init job credential stuff */
+	cred_ctx = slurm_cred_creator_ctx_create(slurmctld_conf.
+						 job_credential_private_key);
+	if (!cred_ctx)
+		fatal("slurm_cred_creator_ctx_create: %m");
+	slurm_cred_ctx_set(cred_ctx, SLURM_CRED_OPT_EXPIRY_WINDOW, CRED_LIFE);
 
 	/* Block SIGALRM everyone not explicitly enabled */
 	if (sigemptyset(&set))
@@ -332,14 +338,10 @@ static void *_slurmctld_signal_hand(void *no_data)
 		case SIGTERM:	/* kill -15 */
 			info("Terminate signal (SIGINT or SIGTERM) received");
 			shutdown_time = time(NULL);
+			slurm_cred_ctx_destroy(cred_ctx);
 			/* send REQUEST_SHUTDOWN_IMMEDIATE RPC */
 			_slurmctld_shutdown();
 			pthread_join(thread_id_rpc, NULL);
-
-			/* ssl clean up */
-			slurm_destroy_ssl_key_ctx(&sign_ctx);
-			slurm_ssl_destroy();
-
 			return NULL;	/* Normal termination */
 			break;
 		case SIGHUP:	/* kill -1 */
@@ -350,8 +352,10 @@ static void *_slurmctld_signal_hand(void *no_data)
 			if (error_code)
 				error("read_slurm_conf error %s",
 				      slurm_strerror(error_code));
-			else 
+			else {
 				_update_logging();
+				_update_cred_key();
+			}
 			break;
 		case SIGABRT:	/* abort */
 			fatal("SIGABRT received");
@@ -1461,6 +1465,7 @@ static void _slurm_rpc_allocate_and_run(slurm_msg_t * msg)
 	uint32_t job_id;
 	resource_allocation_and_run_response_msg_t alloc_msg;
 	struct step_record *step_rec;
+	slurm_cred_t slurm_cred;
 	job_step_create_request_msg_t req_step_msg;
 	/* Locks: Write job, write node, read partition */
 	slurmctld_lock_t job_write_lock = { 
@@ -1510,6 +1515,9 @@ static void _slurm_rpc_allocate_and_run(slurm_msg_t * msg)
 	req_step_msg.num_tasks  = job_desc_msg->num_tasks;
 	req_step_msg.task_dist  = job_desc_msg->task_dist;
 	error_code = step_create(&req_step_msg, &step_rec, true);
+	if (error_code == SLURM_SUCCESS)
+		error_code = _make_step_cred(step_rec, slurm_cred);
+
 	/* note: no need to free step_rec, pointer to global job step record */
 	if (error_code) {
 		job_complete(job_id, job_desc_msg->user_id, false, 0);
@@ -1534,8 +1542,7 @@ static void _slurm_rpc_allocate_and_run(slurm_msg_t * msg)
 		alloc_msg.job_step_id    = step_rec->step_id;
 		alloc_msg.node_cnt       = node_cnt;
 		alloc_msg.node_addr      = node_addr;
-		alloc_msg.credentials    =
-			    &step_rec->job_ptr->details->credential;
+		alloc_msg.cred           = slurm_cred;
 #ifdef HAVE_LIBELAN3
 		alloc_msg.qsw_job = qsw_copy_jobinfo(step_rec->qsw_job);
 #endif
@@ -1545,6 +1552,7 @@ static void _slurm_rpc_allocate_and_run(slurm_msg_t * msg)
 		response_msg.data = &alloc_msg;
 
 		slurm_send_node_msg(msg->conn_fd, &response_msg);
+		slurm_cred_destroy(slurm_cred);
 #ifdef HAVE_LIBELAN3
 		qsw_free_jobinfo(alloc_msg.qsw_job);
 #endif
@@ -1723,6 +1731,7 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 	}
 	if (error_code == SLURM_SUCCESS) {  /* Stuff to do after unlock */
 		_update_logging();
+		_update_cred_key();
 		if (daemonize) {
 			if (chdir(slurmctld_conf.state_save_location))
 				fatal("chdir to %s error %m",
@@ -1839,6 +1848,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	job_step_create_response_msg_t job_step_resp;
 	job_step_create_request_msg_t *req_step_msg =
 	    (job_step_create_request_msg_t *) msg->data;
+	slurm_cred_t slurm_cred;
 	/* Locks: Write jobs, read nodes */
 	slurmctld_lock_t job_write_lock = { 
 		NO_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK };
@@ -1861,6 +1871,8 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		lock_slurmctld(job_write_lock);
 		error_code = step_create(req_step_msg, &step_rec, false);
 	}
+	if (error_code == SLURM_SUCCESS)
+		error_code = _make_step_cred(step_rec, slurm_cred);
 
 	/* return result */
 	if (error_code) {
@@ -1875,9 +1887,8 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		     (long) (clock() - start_time));
 
 		job_step_resp.job_step_id = step_rec->step_id;
-		job_step_resp.node_list = xstrdup(step_rec->step_node_list);
-		job_step_resp.credentials =
-		    &step_rec->job_ptr->details->credential;
+		job_step_resp.node_list   = xstrdup(step_rec->step_node_list);
+		job_step_resp.cred        = slurm_cred;
 
 #ifdef HAVE_LIBELAN3
 		job_step_resp.qsw_job =  qsw_copy_jobinfo(step_rec->qsw_job);
@@ -1889,6 +1900,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 
 		slurm_send_node_msg(msg->conn_fd, &resp);
 		xfree(job_step_resp.node_list);
+		slurm_cred_destroy(slurm_cred);
 #ifdef HAVE_LIBELAN3
 		qsw_free_jobinfo(job_step_resp.qsw_job);
 #endif
@@ -1896,6 +1908,23 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	}
 }
 
+/* create a credential for a given job step, return error code */
+static int _make_step_cred(struct step_record *step_rec, 
+			   slurm_cred_t slurm_cred)
+{
+	slurm_cred_arg_t cred_arg;
+
+	cred_arg.jobid    = step_rec->job_ptr->job_id;
+	cred_arg.stepid   = step_rec->step_id;
+	cred_arg.uid      = step_rec->job_ptr->user_id;
+	cred_arg.hostlist = step_rec->step_node_list;
+	if ((slurm_cred = slurm_cred_create(cred_ctx, &cred_arg)) == NULL) {
+		error("slurm_cred_create error");
+		return ESLURM_INVALID_JOB_CREDENTIAL;
+	}
+	return SLURM_SUCCESS;
+}
+
 /* _slurm_rpc_node_registration - process RPC to determine if a node's 
  *	actual configuration satisfies the configured specification */
 static void _slurm_rpc_node_registration(slurm_msg_t * msg)
@@ -2482,6 +2511,13 @@ static int _shutdown_backup_controller(void)
 	return SLURM_PROTOCOL_SUCCESS;
 }
 
+/* Reset the job credential key based upon configuration parameters */
+static void _update_cred_key(void) 
+{
+	slurm_cred_ctx_key_update(cred_ctx, 
+				  slurmctld_conf.job_credential_private_key);
+}
+
 /* Reset slurmctld logging based upon configuration parameters */
 static void _update_logging(void) 
 {
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index f798dead6488e8f303f3fdc4f8fe26d631a08ee0..75a54d0480d7ae74ad883e6a419ca11546a3e9de 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -63,9 +63,6 @@
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/slurmctld.h"
 
-#include "src/common/credential_utils.h"
-slurm_ssl_key_ctx_t sign_ctx;
-
 #define DETAILS_FLAG 0xdddd
 #define MAX_STR_PACK 1024
 #define SLURM_CREATE_JOB_FLAG_NO_ALLOCATE_0 0
@@ -205,7 +202,6 @@ void delete_job_details(struct job_record *job_entry)
 	xfree(job_entry->details->exc_nodes);
 	FREE_NULL_BITMAP(job_entry->details->req_node_bitmap);
 	FREE_NULL_BITMAP(job_entry->details->exc_node_bitmap);
-	xfree(job_entry->details->credential.node_list);
 	xfree(job_entry->details->features);
 	xfree(job_entry->details->err);
 	xfree(job_entry->details->in);
@@ -566,8 +562,6 @@ static int _load_job_state(Buf buffer)
  */
 void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 {
-	pack_job_credential(&detail_ptr->credential, buffer);
-
 	pack32((uint32_t) detail_ptr->num_procs, buffer);
 	pack32((uint32_t) detail_ptr->min_nodes, buffer);
 	pack32((uint32_t) detail_ptr->max_nodes, buffer);
@@ -597,16 +591,12 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 	char *req_nodes = NULL, *exc_nodes = NULL, *features = NULL;
 	char *err = NULL, *in = NULL, *out = NULL, *work_dir = NULL;
 	bitstr_t *req_node_bitmap = NULL, *exc_node_bitmap = NULL;
-	slurm_job_credential_t *credential_ptr = NULL;
 	uint32_t num_procs, min_nodes, max_nodes, min_procs;
 	uint16_t shared, contiguous, name_len;
 	uint32_t min_memory, min_tmp_disk, total_procs;
 	time_t submit_time;
 
 	/* unpack the job's details from the buffer */
-	if (unpack_job_credential(&credential_ptr, buffer))
-		goto unpack_error;
-
 	safe_unpack32(&num_procs, buffer);
 	safe_unpack32(&min_nodes, buffer);
 	safe_unpack32(&max_nodes, buffer);
@@ -658,8 +648,6 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 	xfree(job_ptr->details->work_dir);
 
 	/* now put the details into the job record */
-	memcpy(&job_ptr->details->credential, credential_ptr,
-	       sizeof(job_ptr->details->credential));
 	job_ptr->details->num_procs = num_procs;
 	job_ptr->details->min_nodes = min_nodes;
 	job_ptr->details->max_nodes = max_nodes;
@@ -1868,31 +1856,6 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	return SLURM_SUCCESS;
 }
 
-/*
- * build_job_cred - build a credential for a job, only valid after 
- *	allocation made
- * IN job_ptr - pointer to the job record 
- */
-void build_job_cred(struct job_record *job_ptr)
-{
-	struct job_details *detail_ptr;
-	if (job_ptr == NULL)
-		return;
-
-	detail_ptr = job_ptr->details;
-	if (detail_ptr == NULL)
-		return;
-
-	detail_ptr->credential.job_id = job_ptr->job_id;
-	detail_ptr->credential.user_id = job_ptr->user_id;
-	detail_ptr->credential.node_list = xstrdup(job_ptr->nodes);
-	detail_ptr->credential.expiration_time = job_ptr->end_time;
-	if (sign_credential(&sign_ctx, &detail_ptr->credential)) {
-		error("Error building credential for job_id %u: %m",
-		      job_ptr->job_id);
-	}
-}
-
 /* 
  * job_time_limit - terminate jobs which have exceeded their time limit
  * global: job_list - pointer global job list
@@ -2504,7 +2467,6 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			else
 				job_ptr->end_time = job_ptr->start_time +
 						    (job_ptr->time_limit * 60);
-			build_job_cred(job_ptr);      /* with new end time */
 			if ((job_ptr->job_state == JOB_RUNNING) &&
 			    (list_is_empty(job_ptr->step_list) == 0))
 				_xmit_new_end_time(job_ptr);
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index b86c90a53f7075235e0746d68300b5d71ed8d8d2..33d06d59c9db1fd4186d90954e4a2ab0e8fbb642 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -128,14 +128,14 @@ int count_cpus(unsigned *bitmap)
  *	their state NODE_STATE_COMPLETING
  * IN job_ptr - pointer to terminating job
  * IN timeout - true of job exhausted time limit, send REQUEST_KILL_TIMELIMIT
- *	RPC instead of REQUEST_REVOKE_JOB_CREDENTIAL
+ *	RPC instead of REQUEST_KILL_JOB
  * globals: node_record_count - number of nodes in the system
  *	node_record_table_ptr - pointer to global node table
  */
 void deallocate_nodes(struct job_record *job_ptr, bool timeout)
 {
 	int i;
-	revoke_credential_msg_t *revoke_job_cred;
+	kill_job_msg_t *kill_job;
 	agent_arg_t *agent_args;
 	pthread_attr_t attr_agent;
 	pthread_t thread_agent;
@@ -149,16 +149,12 @@ void deallocate_nodes(struct job_record *job_ptr, bool timeout)
 	if (timeout)
 		agent_args->msg_type = REQUEST_KILL_TIMELIMIT;
 	else
-		agent_args->msg_type = REQUEST_REVOKE_JOB_CREDENTIAL;
+		agent_args->msg_type = REQUEST_KILL_JOB;
 	agent_args->retry = 1;
-	revoke_job_cred = xmalloc(sizeof(revoke_credential_msg_t));
+	kill_job = xmalloc(sizeof(kill_job_msg_t));
 	last_node_update = time(NULL);
-	revoke_job_cred->job_id = job_ptr->job_id;
-	revoke_job_cred->job_uid = job_ptr->user_id;
-	revoke_job_cred->expiration_time =
-	    job_ptr->details->credential.expiration_time;
-	memset((void *) revoke_job_cred->signature, 0,
-	       sizeof(revoke_job_cred->signature));
+	kill_job->job_id = job_ptr->job_id;
+	kill_job->job_uid = job_ptr->user_id;
 
 	for (i = 0; i < node_record_count; i++) {
 		if (bit_test(job_ptr->node_bitmap, i) == 0)
@@ -181,15 +177,14 @@ void deallocate_nodes(struct job_record *job_ptr, bool timeout)
 	}
 
 	if (agent_args->node_count == 0) {
-		error("Job %u allocated no nodes on for credential revoke",
+		error("Job %u allocated no nodes to be killed on",
 		      job_ptr->job_id);
-		xfree(revoke_job_cred);
 		xfree(agent_args);
 		return;
 	}
 
-	agent_args->msg_args = revoke_job_cred;
-	debug("Spawning revoke credential agent");
+	agent_args->msg_args = kill_job;
+	debug2("Spawning job kill agent");
 	if (pthread_attr_init(&attr_agent))
 		fatal("pthread_attr_init error %m");
 	if (pthread_attr_setdetachstate
@@ -798,7 +793,6 @@ int select_nodes(struct job_record *job_ptr, bool test_only)
 	else
 		job_ptr->end_time = job_ptr->start_time + 
 				    (job_ptr->time_limit * 60);   /* secs */
-	build_job_cred(job_ptr); /* uses end_time set above */
 
       cleanup:
 	FREE_NULL_BITMAP(req_bitmap);
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 6a0245c8d6ec96d19a47c3042cbc26e0f165f695..60d9ddd2b0855bb440f0cc1fb6848627c34e1e80 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -849,7 +849,7 @@ static int _sync_nodes_to_jobs(void)
 }
 
 /* For jobs which are in state COMPLETING, deallocate the nodes and 
- * issue the RPC to revoke credentials */
+ * issue the RPC to kill the job */
 static int _sync_nodes_to_comp_job(void)
 {
 	struct job_record *job_ptr;
@@ -862,8 +862,7 @@ static int _sync_nodes_to_comp_job(void)
 		if ((job_ptr->node_bitmap) &&
 		    (job_ptr->job_state & JOB_COMPLETING)) {
 			update_cnt++;
-			info("Revoking credentials for job_id %u",
-			     job_ptr->job_id);
+			info("Killing job_id %u", job_ptr->job_id);
 			deallocate_nodes(job_ptr, false);
 		}
 	}
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index f29107f7e4911175628897dde5bac2e0f61ac6fb..6ef5624c1c95635aad91b645c3152288c1f642d4 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -221,7 +221,6 @@ struct job_details {
 	char *exc_nodes;		/* excluded nodes */
 	bitstr_t *req_node_bitmap;	/* bitmap of required nodes */
 	bitstr_t *exc_node_bitmap;	/* bitmap of excluded nodes */
-	slurm_job_credential_t	credential;	/* job credential */
 	char *features;			/* required features */
 	uint16_t shared;		/* set node can be shared*/
 	uint16_t contiguous;		/* set if requires contiguous nodes */
@@ -319,13 +318,6 @@ extern void  allocate_nodes (unsigned *bitmap);
  */
 extern char * bitmap2node_name (bitstr_t *bitmap) ;
 
-/*
- * build_job_cred - build a credential for a job, only valid after 
- *	allocation made
- * IN job_ptr - pointer to the job record 
- */
-void build_job_cred(struct job_record *job_ptr);
-
 /*
  * build_node_details - set cpu counts and addresses for allocated nodes:
  *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
@@ -404,7 +396,7 @@ extern struct step_record * create_step_record (struct job_record *job_ptr);
  *	their state NODE_STATE_COMPLETING
  * IN job_ptr - pointer to terminating job
  * IN timeout - true of job exhausted time limit, send REQUEST_KILL_TIMELIMIT
- *	RPC instead of REQUEST_REVOKE_JOB_CREDENTIAL
+ *	RPC instead of REQUEST_JOB_KILL
  * globals: node_record_count - number of nodes in the system
  *	node_record_table_ptr - pointer to global node table
  */