diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index f59951dbce42781979b41e11e291c2602b9413b5..27ca2fd412974c3fd9267ec57dbe0d9ce294ed60 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -1097,6 +1097,7 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	uint16_t immediate;	/* 1 if allocate to run or fail immediately,
 				 * 0 if to be queued awaiting resources */
 	uint32_t job_id;	/* job ID, default set by SLURM */
+	char * job_id_str;      /* string representation of the jobid */
 	uint16_t kill_on_node_fail; /* 1 if node failure to kill job,
 				     * 0 otherwise,default=1 */
 	char *licenses;		/* licenses required by the job */
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index 9a745f1ed1e2861bd4c56bb18ac38bb81b412609..04792dda29c1a19be2bc6b181f1da5a611698781 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -6888,6 +6888,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		packstr(job_desc_ptr->features, buffer);
 		packstr(job_desc_ptr->gres, buffer);
 		pack32(job_desc_ptr->job_id, buffer);
+		packstr(job_desc_ptr->job_id_str, buffer);
 		packstr(job_desc_ptr->name, buffer);
 
 		packstr(job_desc_ptr->alloc_node, buffer);
@@ -7362,6 +7363,9 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->gres, &uint32_tmp,buffer);
 		safe_unpack32(&job_desc_ptr->job_id, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->job_id_str,
+				       &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->name,
 				       &uint32_tmp, buffer);
 
diff --git a/src/scontrol/update_job.c b/src/scontrol/update_job.c
index a94504615c5f154f0bae2f99fdd858225ce90237..3858538077561065b7b5a31439cdfec545c3c17e 100644
--- a/src/scontrol/update_job.c
+++ b/src/scontrol/update_job.c
@@ -527,13 +527,14 @@ scontrol_update_job (int argc, char *argv[])
 	job_desc_msg_t job_msg;
 	job_ids_t *ids = NULL;
 	uint32_t num_ids = 0;
+	char *job_id_str;
 
 	slurm_init_job_desc_msg (&job_msg);
 
 	/* set current user, needed e.g., for AllowGroups checks */
 	job_msg.user_id = getuid();
 
-	for (i=0; i<argc; i++) {
+	for (i = 0; i < argc; i++) {
 		tag = argv[i];
 		val = strchr(argv[i], '=');
 		if (val) {
@@ -1021,32 +1022,36 @@ scontrol_update_job (int argc, char *argv[])
 		_free_job_ids(ids, num_ids);
 		return 0;
 	}
+
+	job_id_str = NULL;
 	for (i = 0; i < num_ids; i++) {
-		job_msg.job_id = ids[i].job_id;
-		rc = 0;
-		if (slurm_update_job(&job_msg)) {
-			/* Save the errno in case one
-			 * or more array tasks are in
-			 * error.
-			 */
-			rc = slurm_get_errno();
-			if (ids[i].array_task_id == NO_VAL) {
-				error("Error updating job %u: %s",
-				      ids[i].job_id, slurm_strerror(rc));
-			} else {
-				error("Error updating job %u_%u (%u): %s",
-				      ids[i].array_job_id,
-				      ids[i].array_task_id,
-				      ids[i].job_id, slurm_strerror(rc));
-			}
-			/* Print the errno message for each
-			 * job array task.
-			 */
-			slurm_perror("slurm_update_job()");
+
+		if (ids[i].array_task_str) {
+			xstrfmtcat(job_id_str, "%u_%s",
+				   ids[i].array_job_id, ids[i].array_task_str);
+		} else if (ids[i].array_task_id != NO_VAL) {
+			xstrfmtcat(job_id_str, "%u_%u",
+				   ids[i].array_job_id, ids[i].array_task_id);
+		} else {
+			xstrfmtcat(job_id_str, "%u", ids[i].array_job_id);
 		}
+
+		job_msg.job_id_str = job_id_str;
+		rc = slurm_update_job(&job_msg);
+
+		if (rc != SLURM_SUCCESS)
+			exit_code = 1;
+		if ((rc != SLURM_SUCCESS) && (quiet_flag != 1)) {
+			fprintf(stderr, "%s for job %s\n",
+				slurm_strerror(slurm_get_errno()), job_id_str);
+		}
+
+		xfree(job_id_str);
 	}
+
 	if (update_size)	/* See check above for one job ID */
 		_update_job_size(job_msg.job_id);
+
 	_free_job_ids(ids, num_ids);
 
 	return rc;
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 47ffe183d2afbcfa8d8a6c86155db51b50ba692d..a9b79418629a04cc4409e395dd32f5a6a54f5a10 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -9740,7 +9740,7 @@ extern int update_job(job_desc_msg_t * job_specs, uid_t uid)
  * global: job_list - global list of job entries
  *	last_job_update - time of last job table update
  */
-extern int update_job_str(job_desc_msg_t * job_specs, uid_t uid)
+extern int update_job_str(job_desc_msg_t *job_specs, uid_t uid)
 {
 	struct job_record *job_ptr, *new_job_ptr;
 	slurm_ctl_conf_t *conf;
@@ -9752,9 +9752,9 @@ extern int update_job_str(job_desc_msg_t * job_specs, uid_t uid)
 	int len, rc, rc2;
 	char *end_ptr, *tok, *tmp;
 
-/* For testing purposes only */
-char job_id_str[32];
-snprintf(job_id_str, sizeof(job_id_str), "%u", job_specs->job_id);
+	/* For testing purposes only */
+	char job_id_str[32];
+	snprintf(job_id_str, sizeof(job_id_str), "%s", job_specs->job_id_str);
 
 	if (max_array_size == NO_VAL) {
 		conf = slurm_conf_lock();
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 5c994ad292ad48455a77bb3733d3afba61aa4042..a21d7ba95afc8aa432de0da6523ad33f8caf7d88 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -3151,7 +3151,8 @@ fini:	xfree(err_msg);
 }
 
 /* _slurm_rpc_update_job - process RPC to update the configuration of a
- *	job (e.g. priority) */
+ * job (e.g. priority)
+ */
 static void _slurm_rpc_update_job(slurm_msg_t * msg)
 {
 	int error_code;
@@ -3168,7 +3169,7 @@ static void _slurm_rpc_update_job(slurm_msg_t * msg)
 	/* do RPC call */
 	dump_job_desc(job_desc_msg);
 	lock_slurmctld(job_write_lock);
-	error_code = update_job(job_desc_msg, uid);
+	error_code = update_job_str(job_desc_msg, uid);
 	unlock_slurmctld(job_write_lock);
 	END_TIMER2("_slurm_rpc_update_job");
 
@@ -3179,7 +3180,7 @@ static void _slurm_rpc_update_job(slurm_msg_t * msg)
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		info("_slurm_rpc_update_job complete JobId=%u uid=%d %s",
-		       job_desc_msg->job_id, uid, TIME_STR);
+		     job_desc_msg->job_id, uid, TIME_STR);
 		slurm_send_rc_msg(msg, SLURM_SUCCESS);
 		/* Below functions provide their own locking */
 		schedule_job_save();
@@ -3841,6 +3842,7 @@ inline static void _slurm_rpc_requeue(slurm_msg_t * msg)
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	START_TIMER;
+
 	info("%s: Processing RPC: REQUEST_JOB_REQUEUE from uid=%d", __func__,
 	     uid);