diff --git a/src/slurmctld/fed_mgr.c b/src/slurmctld/fed_mgr.c
index 2b2eb7bffd36320c5f0290366a5150b2f76057c0..cc917b8eecb5b9b52e208d2d2df88a228dbbe97a 100644
--- a/src/slurmctld/fed_mgr.c
+++ b/src/slurmctld/fed_mgr.c
@@ -1161,8 +1161,7 @@ static void *_sib_will_run(void *arg)
 		if (job_desc->job_id == NO_VAL) {
 			/* Get a job_id now without incrementing the job_id
 			 * count. This prevents burning job_ids on will_runs */
-			job_desc->job_id =
-				fed_mgr_get_job_id(get_next_job_id(true));
+			job_desc->job_id = get_next_job_id(true);
 		}
 
 		rc = job_allocate(sib_msg->data, false, true,
@@ -1658,7 +1657,7 @@ extern int fed_mgr_job_allocate(slurm_msg_t *msg, job_desc_msg_t *job_desc,
 	lock_slurmctld(job_write_lock);
 	/* get job_id now. Can't submit job to get job_id as job_allocate will
 	 * change the job_desc. */
-	job_desc->job_id = fed_mgr_get_job_id(get_next_job_id(false));
+	job_desc->job_id = get_next_job_id(false);
 	unlock_slurmctld(job_write_lock);
 
 	/* Don't job/node write lock on _find_start_now_sib. It locks inside
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 61704dd402de5e0d3af12865c290eb78aeb8b23b..ad3d9491befc891b4eabe08789eb91229549ac4a 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -9899,7 +9899,7 @@ void reset_first_job_id(void)
 /*
  * Return the next available job_id to be used.
  *
- * Must have job_write lock when grabbing a job_id
+ * Must have job_write and fed_read locks when grabbing a job_id
  *
  * IN test_only - if true, doesn't advance the job_id sequence, just returns
  * 	what the next job id will be.
@@ -9917,14 +9917,16 @@ extern uint32_t get_next_job_id(bool test_only)
 	for (i = 0; i < max_jobs; i++) {
 		if (++tmp_id_sequence >= slurmctld_conf.max_job_id)
 			tmp_id_sequence = slurmctld_conf.first_job_id;
-		if (find_job_record(tmp_id_sequence))
+
+		new_id = fed_mgr_get_job_id(tmp_id_sequence);
+
+		if (find_job_record(new_id))
 			continue;
-		if (_dup_job_file_test(tmp_id_sequence))
+		if (_dup_job_file_test(new_id))
 			continue;
 
-		new_id = tmp_id_sequence;
 		if (!test_only)
-			job_id_sequence = new_id;
+			job_id_sequence = tmp_id_sequence;
 
 		return new_id;
 	}
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 5593876e6958d3163713605c517be2026a3a7d45..6f9219d3d856465b0e82085142cd619486b15714 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -662,7 +662,7 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	char *licenses_used;
 	uint32_t next_job_id;
 	slurmctld_lock_t job_write_lock = {
-		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK, READ_LOCK };
 
 	/* Do before config lock */
 	licenses_used = get_licenses_used();
@@ -1058,10 +1058,10 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 	resource_allocation_response_msg_t alloc_msg;
 	/* Locks: Read config, read job, read node, read partition */
 	slurmctld_lock_t job_read_lock = {
-		READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
+		READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK };
 	/* Locks: Read config, write job, write node, read partition */
 	slurmctld_lock_t job_write_lock = {
-		READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK };
+		READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred,
 					 slurmctld_config.auth_info);
 	int immediate = job_desc_msg->immediate;
@@ -2576,10 +2576,10 @@ static void _slurm_rpc_job_will_run(slurm_msg_t * msg, bool allow_sibs)
 	job_desc_msg_t *job_desc_msg = (job_desc_msg_t *) msg->data;
 	/* Locks: Read config, read job, read node, read partition */
 	slurmctld_lock_t job_read_lock = {
-		READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
+		READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK };
 	/* Locks: Write job, Write node, read partition */
 	slurmctld_lock_t job_write_lock = {
-		NO_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK };
+		NO_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred,
 					 slurmctld_config.auth_info);
 	uint16_t port;	/* dummy value */
@@ -3508,10 +3508,10 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 	job_desc_msg_t *job_desc_msg = (job_desc_msg_t *) msg->data;
 	/* Locks: Read config, read job, read node, read partition */
 	slurmctld_lock_t job_read_lock = {
-		READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
+		READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK };
 	/* Locks: Write job, read node, read partition */
 	slurmctld_lock_t job_write_lock = {
-		NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
+		NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK, READ_LOCK };
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred,
 					 slurmctld_config.auth_info);
 	char *err_msg = NULL;