From e3b47f0be913a1ac700ac5bb99cbef01d8aa60e2 Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Wed, 26 Sep 2007 18:36:41 +0000
Subject: [PATCH] svn merge -r12403:12413
 https://eris.llnl.gov/svn/slurm/branches/slurm-1.2

---
 NEWS                                          |  2 +
 src/common/timers.c                           |  9 +-
 src/common/timers.h                           |  8 +-
 .../block_allocator/block_allocator.c         | 12 +--
 .../select/bluegene/plugin/select_bluegene.c  |  3 +-
 src/slurmctld/controller.c                    |  7 +-
 src/slurmctld/job_mgr.c                       |  8 +-
 src/slurmctld/job_scheduler.c                 |  3 +
 src/slurmctld/node_mgr.c                      |  7 +-
 src/slurmctld/partition_mgr.c                 |  3 +-
 src/slurmctld/proc_req.c                      | 93 +++++++++++--------
 src/slurmctld/read_config.c                   |  5 +-
 12 files changed, 89 insertions(+), 71 deletions(-)

diff --git a/NEWS b/NEWS
index cbd1cf1a46c..9764c5350ea 100644
--- a/NEWS
+++ b/NEWS
@@ -65,6 +65,8 @@ documents those changes that are of interest to users and admins.
 =========================
  -- BLUEGENE - bug fix for smap stating passthroughs are used when they aren't
  -- fixed bug in sview to be able to edit partitions correctly
+ -- fixed bug so in slurm.conf files where SlurmdPort isn't defined things
+    work correctly.
 
 * Changes in SLURM 1.2.17
 =========================
diff --git a/src/common/timers.c b/src/common/timers.c
index 421cdae9182..06cdec28d09 100644
--- a/src/common/timers.c
+++ b/src/common/timers.c
@@ -45,16 +45,19 @@
  * IN tv2 - end of event
  * OUT tv_str - place to put delta time in format "usec=%ld"
  * IN len_tv_str - size of tv_str in bytes
+ * IN from - where the function was called form
  */
 inline void diff_tv_str(struct timeval *tv1,struct timeval *tv2, 
-		char *tv_str, int len_tv_str)
+		char *tv_str, int len_tv_str, char *from)
 {
 	long delta_t;
 	delta_t  = (tv2->tv_sec  - tv1->tv_sec) * 1000000;
 	delta_t +=  tv2->tv_usec - tv1->tv_usec;
 	snprintf(tv_str, len_tv_str, "usec=%ld", delta_t);
-	if (delta_t > 1000000)
-		verbose("Warning: Note very large processing time: %s",tv_str); 
+	if ((delta_t > 1000000) && from) {
+		verbose("Warning: Note very large processing time from %s: %s",
+			from, tv_str);
+	}
 }
 
 /*
diff --git a/src/common/timers.h b/src/common/timers.h
index 5d1f1588925..0404c4e67d4 100644
--- a/src/common/timers.h
+++ b/src/common/timers.h
@@ -43,7 +43,9 @@
 #define DEF_TIMERS	struct timeval tv1, tv2; char tv_str[20]
 #define START_TIMER	gettimeofday(&tv1, NULL)
 #define END_TIMER	gettimeofday(&tv2, NULL); \
-			diff_tv_str(&tv1, &tv2, tv_str, 20)
+			diff_tv_str(&tv1, &tv2, tv_str, 20, NULL)
+#define END_TIMER2(from) gettimeofday(&tv2, NULL); \
+			diff_tv_str(&tv1, &tv2, tv_str, 20, from)
 #define DELTA_TIMER	diff_tv(&tv1, &tv2)
 #define TIME_STR 	tv_str
 
@@ -54,8 +56,8 @@
  * OUT tv_str - place to put delta time in format "usec=%ld"
  * IN len_tv_str - size of tv_str in bytes
  */
-extern inline void diff_tv_str(struct timeval *tv1,struct timeval *tv2, 
-		char *tv_str, int len_tv_str);
+extern inline void diff_tv_str(struct timeval *tv1,struct timeval *tv2,
+                char *tv_str, int len_tv_str, char *from);
 
 /*
  * diff_tv - return the difference between two times
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index b136145a104..64446d6d725 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -1046,13 +1046,13 @@ extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state)
 	}
 
 #ifdef HAVE_BG
-	debug("ba_update_node_state: new state of node[%c%c%c] is %s", 
-	      alpha_num[ba_node->coord[X]], alpha_num[ba_node->coord[Y]],
-	      alpha_num[ba_node->coord[Z]], node_state_string(state)); 
+	debug2("ba_update_node_state: new state of node[%c%c%c] is %s", 
+	       alpha_num[ba_node->coord[X]], alpha_num[ba_node->coord[Y]],
+	       alpha_num[ba_node->coord[Z]], node_state_string(state)); 
 #else
-	debug("ba_update_node_state: new state of node[%d] is %s", 
-	      ba_node->coord[X],
-	      node_state_string(state)); 
+	debug2("ba_update_node_state: new state of node[%d] is %s", 
+	       ba_node->coord[X],
+	       node_state_string(state)); 
 #endif
 
 	/* basically set the node as used */
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 79622229f0e..711dbbb3b28 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -315,8 +315,7 @@ extern int select_p_state_save(char *dir_name)
 	xfree(new_file);
 
 	free_buf(buffer);
-	END_TIMER;
-	debug3("select_p_state_save %s", TIME_STR);
+	END_TIMER2("select_p_state_save");
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index d136c3ba8a0..9d477d8f675 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -933,10 +933,7 @@ static void *_slurmctld_background(void *no_data)
 			(void) _shutdown_backup_controller(0);
 		}
 		unlock_slurmctld(config_read_lock);
-
-		END_TIMER;
-		if (DELTA_TIMER > 1000000)	/* more than one second */ 
-			info("_slurmctld_background loop %s", TIME_STR);
+		END_TIMER2("_slurmctld_background");
 	}
 
 	debug3("_slurmctld_background shutting down");
@@ -1136,7 +1133,7 @@ static int _shutdown_backup_controller(int wait_time)
 	START_TIMER;
 	if (slurm_send_recv_rc_msg_only_one(&req, &rc, 
 				(CONTROL_TIMEOUT * 1000)) < 0) {
-		END_TIMER;
+		END_TIMER2("_shutdown_backup_controller");
 		error("_shutdown_backup_controller:send/recv: %m, %s", TIME_STR);
 		return SLURM_ERROR;
 	}
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 8b81009c7f1..3ba0807609b 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -348,8 +348,7 @@ int dump_all_job_state(void)
 	unlock_state_files();
 
 	free_buf(buffer);
-	END_TIMER;
-	debug3("dump_all_job_state %s", TIME_STR);
+	END_TIMER2("dump_all_job_state");
 	return error_code;
 }
 
@@ -2030,12 +2029,14 @@ _copy_job_desc_to_file(job_desc_msg_t * job_desc, uint32_t job_id)
 {
 	int error_code = 0;
 	char *dir_name, job_dir[20], *file_name;
+	DEF_TIMERS;
 
+	START_TIMER;
 	/* Create state_save_location directory */
 	dir_name = xstrdup(slurmctld_conf.state_save_location);
 
 	/* Create job_id specific directory */
-	sprintf(job_dir, "/job.%d", job_id);
+	sprintf(job_dir, "/job.%u", job_id);
 	xstrcat(dir_name, job_dir);
 	if (mkdir(dir_name, 0700)) {
 		error("mkdir(%s) error %m", dir_name);
@@ -2061,6 +2062,7 @@ _copy_job_desc_to_file(job_desc_msg_t * job_desc, uint32_t job_id)
 	}
 
 	xfree(dir_name);
+	END_TIMER2("_copy_job_desc_to_file");
 	return error_code;
 }
 
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 7dec58858fb..5b341537a47 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -169,7 +169,9 @@ int schedule(void)
 #endif
 	static bool wiki_sched = false;
 	static bool wiki_sched_test = false;
+	DEF_TIMERS;
 
+	START_TIMER;
 	/* don't bother trying to avoid fragmentation with sched/wiki */
 	if (!wiki_sched_test) {
 		char *sched_type = slurm_get_sched_type();
@@ -274,6 +276,7 @@ int schedule(void)
 	xfree(failed_parts);
 	xfree(job_queue);
 	unlock_slurmctld(job_write_lock);
+	END_TIMER2("schedule");
 	return job_cnt;
 }
 
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 6b9ab02d152..2606b494d8e 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -306,8 +306,7 @@ int dump_all_node_state ( void )
 	unlock_state_files ();
 
 	free_buf (buffer);
-	END_TIMER;
-	debug3("dump_all_node_state %s", TIME_STR);
+	END_TIMER2("dump_all_node_state");
 	return error_code;
 }
 
@@ -977,9 +976,7 @@ void set_slurmd_addr (void)
 		       node_ptr->comm_name);
 	}
 
-	END_TIMER;
-	debug("set_slurmd_addr: got IP addresses for all nodes %s",
-		TIME_STR);
+	END_TIMER2("set_slurmd_addr");
 	return;
 }
 
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 4e6bd0472ec..2515165c567 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -342,8 +342,7 @@ int dump_all_part_state(void)
 	unlock_state_files();
 
 	free_buf(buffer);
-	END_TIMER;
-	debug3("dump_all_part_state %s", TIME_STR);
+	END_TIMER2("dump_all_part_state");
 	return 0;
 }
 
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 3244da8f3c6..955d74adcd0 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -510,7 +510,7 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 					  immediate, false, 
 					  true, uid, &job_ptr);
 		/* unlock after finished using the job structure data */
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_allocate_resources");
 	}
 
 	/* return result */
@@ -586,8 +586,7 @@ static void _slurm_rpc_dump_conf(slurm_msg_t * msg)
 	} else {
 		_fill_ctld_conf(&config_tbl);
 		unlock_slurmctld(config_read_lock);
-		END_TIMER;
-		debug2("_slurm_rpc_dump_conf %s", TIME_STR);
+		END_TIMER2("_slurm_rpc_dump_conf");
 
 		/* init response_msg structure */
 		slurm_msg_t_init(&response_msg);
@@ -627,7 +626,7 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg)
 			      job_info_request_msg->show_flags, 
 			      g_slurm_auth_get_uid(msg->auth_cred));
 		unlock_slurmctld(job_read_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_dump_jobs");
 		debug2("_slurm_rpc_dump_jobs, size=%d %s",
 		     dump_size, TIME_STR);
 
@@ -662,7 +661,7 @@ static void _slurm_rpc_end_time(slurm_msg_t * msg)
 	lock_slurmctld(job_read_lock);
 	rc = job_end_time(time_req_msg, &timeout_msg);
 	unlock_slurmctld(job_read_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_end_time");
 
 	if (rc != SLURM_SUCCESS) {
 		slurm_send_rc_msg(msg, rc);
@@ -708,7 +707,7 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg)
 		pack_all_node(&dump, &dump_size, node_req_msg->show_flags, 
 			      g_slurm_auth_get_uid(msg->auth_cred));
 		unlock_slurmctld(node_read_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_dump_nodes");
 		debug2("_slurm_rpc_dump_nodes, size=%d %s",
 		     dump_size, TIME_STR);
 
@@ -732,15 +731,16 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg)
 	char *dump;
 	int dump_size;
 	slurm_msg_t response_msg;
-	part_info_request_msg_t  *part_req_msg = 
-			(part_info_request_msg_t  *) msg->data;
-	/* Locks: Read configureatin and partition */
+	part_info_request_msg_t  *part_req_msg;
+
+	/* Locks: Read configuration and partition */
 	slurmctld_lock_t part_read_lock = { 
 		READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
 	uid_t uid;
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_PARTITION_INFO");
+	part_req_msg = (part_info_request_msg_t  *) msg->data;
 	lock_slurmctld(part_read_lock);
 
 	uid = g_slurm_auth_get_uid(msg->auth_cred);
@@ -756,7 +756,7 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg)
 		pack_all_part(&dump, &dump_size, part_req_msg->show_flags, 
 				g_slurm_auth_get_uid(msg->auth_cred));
 		unlock_slurmctld(part_read_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_dump_partitions");
 		debug2("_slurm_rpc_dump_partitions, size=%d %s",
 		     dump_size, TIME_STR);
 
@@ -801,7 +801,7 @@ static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg)
 	                        epilog_msg->return_code))
 		run_scheduler = true;
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_epilog_complete");
 
 	if (epilog_msg->return_code)
 		error("_slurm_rpc_epilog_complete JobId=%u Node=%s Err=%s %s",
@@ -847,7 +847,7 @@ static void _slurm_rpc_job_step_kill(slurm_msg_t * msg)
 					job_step_kill_msg->signal, 
 					job_step_kill_msg->batch_flag, uid);
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_job_step_kill");
 
 		/* return result */
 		if (error_code) {
@@ -869,7 +869,7 @@ static void _slurm_rpc_job_step_kill(slurm_msg_t * msg)
 					     job_step_kill_msg->signal,
 					     uid);
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_job_step_kill");
 
 		/* return result */
 		if (error_code) {
@@ -919,7 +919,7 @@ static void _slurm_rpc_complete_job_allocation(slurm_msg_t * msg)
 	error_code = job_complete(comp_msg->job_id, uid,
 				  job_requeue, comp_msg->job_rc);
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_complete_job_allocation");
 
 	/* return result */
 	if (error_code) {
@@ -959,7 +959,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 
 	if (!validate_super_user(uid)) {
 		/* Only the slurmstepd can complete a batch script */
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_complete_batch_script");
 		return;
 	}
  
@@ -1000,7 +1000,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 	error_code = job_complete(comp_msg->job_id, uid,
 				  job_requeue, comp_msg->job_rc);
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_complete_batch_script");
 
 	/* return result */
 	if (error_code) {
@@ -1068,7 +1068,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	}
 	if (error_code == SLURM_SUCCESS)
 		error_code = _make_step_cred(step_rec, &slurm_cred);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_job_step_create");
 
 	/* return result */
 	if (error_code) {
@@ -1133,7 +1133,7 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg)
 				request->job_id, request->step_id, 
 				uid, request->show_flags, buffer);
 		unlock_slurmctld(job_read_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_job_step_get_info");
 		if (error_code) {
 			/* job_id:step_id not found or otherwise *\
 			\* error message is printed elsewhere    */
@@ -1204,7 +1204,7 @@ static void _slurm_rpc_job_will_run(slurm_msg_t * msg)
 		error_code = job_allocate(job_desc_msg, 
 				true, true, true, uid, &job_ptr);
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_job_will_run");
 	}
 
 	/* return result */
@@ -1266,7 +1266,7 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg)
 					node_reg_stat_msg->status);
 #endif
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_node_registration");
 	}
 
 	/* return result */
@@ -1306,7 +1306,7 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg)
 	do_unlock = true;
 	lock_slurmctld(job_read_lock);
 	error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_job_alloc_info");
 
 	/* return result */
 	if (error_code || (job_ptr == NULL)) {
@@ -1381,7 +1381,7 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
 	do_unlock = true;
 	lock_slurmctld(job_read_lock);
 	error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_job_alloc_info_lite");
 
 	/* return result */
 	if (error_code || (job_ptr == NULL)) {
@@ -1475,7 +1475,7 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 		unlock_slurmctld(config_write_lock);
 		trigger_reconfig();
 	}
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_reconfigure_controller");
 
 	/* return result */
 	if (error_code) {
@@ -1634,7 +1634,7 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 		error_code = job_complete(req->job_id, uid, job_requeue, 
 					  step_rc);
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_step_complete");
 
 		/* return result */
 		if (error_code) {
@@ -1651,7 +1651,7 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 		error_code = job_step_complete(req->job_id, req->job_step_id,
 				uid, job_requeue, step_rc);
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_step_complete");
 
 		/* return result */
 		if (error_code) {
@@ -1695,7 +1695,7 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg)
 
 	lock_slurmctld(job_read_lock);
 	error_code = job_alloc_info(uid, req->job_id, &job_ptr);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_step_layout");
 	/* return result */
 	if (error_code || (job_ptr == NULL)) {
 		unlock_slurmctld(job_read_lock);
@@ -1785,7 +1785,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 			error_code = _launch_batch_step(job_desc_msg, uid,
 							&step_id);
 			unlock_slurmctld(job_write_lock);
-			END_TIMER;
+			END_TIMER2("_slurm_rpc_submit_batch_job");
 
 			if (error_code != SLURM_SUCCESS) {
 				info("_launch_batch_step: %s",
@@ -1814,7 +1814,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 				job_desc_msg->immediate, false,
 				false, uid, &job_ptr);
 		unlock_slurmctld(job_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_submit_batch_job");
 	}
 
 	/* return result */
@@ -1862,7 +1862,7 @@ static void _slurm_rpc_update_job(slurm_msg_t * msg)
 	lock_slurmctld(job_write_lock);
 	error_code = update_job(job_desc_msg, uid);
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_update_job");
 
 	/* return result */
 	if (error_code) {
@@ -1892,13 +1892,16 @@ static void _slurm_rpc_update_job(slurm_msg_t * msg)
 extern int slurm_drain_nodes(char *node_list, char *reason)
 {
 	int error_code;
+	DEF_TIMERS;
 	/* Locks: Write  node */
 	slurmctld_lock_t node_write_lock = { 
 		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
 
+	START_TIMER;
 	lock_slurmctld(node_write_lock);
 	error_code = drain_nodes(node_list, reason);
 	unlock_slurmctld(node_write_lock);
+	END_TIMER2("slurm_drain_nodes");
 
 	return error_code;
 }
@@ -1914,13 +1917,16 @@ extern int slurm_drain_nodes(char *node_list, char *reason)
 extern int slurm_fail_job(uint32_t job_id)
 {
 	int error_code;
+	DEF_TIMERS;
 	/* Locks: Write job and node */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
 
+	START_TIMER;
 	lock_slurmctld(job_write_lock);
 	error_code = job_fail(job_id);
 	unlock_slurmctld(job_write_lock);
+	END_TIMER2("slurm_fail_job");
 
 	return error_code;
 }
@@ -1953,7 +1959,7 @@ static void _slurm_rpc_update_node(slurm_msg_t * msg)
 		lock_slurmctld(node_write_lock);
 		error_code = update_node(update_node_msg_ptr);
 		unlock_slurmctld(node_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_update_node");
 	}
 
 	/* return result */
@@ -2009,7 +2015,7 @@ static void _slurm_rpc_update_partition(slurm_msg_t * msg)
 			error_code = update_part(part_desc_ptr);
 			unlock_slurmctld(part_write_lock);
 		}
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_update_partition");
 	}
 
 	/* return result */
@@ -2058,7 +2064,7 @@ static void _slurm_rpc_delete_partition(slurm_msg_t * msg)
 		lock_slurmctld(part_write_lock);
 		error_code = delete_partition(part_desc_ptr);
 		unlock_slurmctld(part_write_lock);
-		END_TIMER;
+		END_TIMER2("_slurm_rpc_delete_partition");
 	}
 
 	/* return result */
@@ -2089,7 +2095,7 @@ static void _slurm_rpc_job_ready(slurm_msg_t * msg)
 
 	START_TIMER;
 	error_code = job_node_ready(id_msg->job_id, &result);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_job_ready");
 
 	if (error_code) {
 		debug2("_slurm_rpc_job_ready: %s",
@@ -2136,7 +2142,7 @@ static void  _slurm_rpc_node_select_info(slurm_msg_t * msg)
 					sel_req_msg->last_update,
 					&buffer);
 	}
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_node_select_info");
 
 	if (error_code) {
 		debug3("_slurm_rpc_node_select_info: %s", 
@@ -2193,7 +2199,7 @@ inline static void _slurm_rpc_suspend(slurm_msg_t * msg)
 	lock_slurmctld(job_write_lock);
 	error_code = job_suspend(sus_ptr, uid, msg->conn_fd);
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_suspend");
 	
 	if (error_code) {
 		info("_slurm_rpc_suspend(%s) %u: %s", op,
@@ -2226,7 +2232,7 @@ inline static void _slurm_rpc_requeue(slurm_msg_t * msg)
 	error_code = job_requeue(uid, requeue_ptr->job_id, 
 		msg->conn_fd);
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_requeue");
 
 	if (error_code) {
 		info("_slurm_rpc_requeue %u: %s", requeue_ptr->job_id,
@@ -2284,7 +2290,7 @@ inline static void  _slurm_rpc_checkpoint(slurm_msg_t * msg)
 	lock_slurmctld(job_write_lock);
 	error_code = job_step_checkpoint(ckpt_ptr, uid, msg->conn_fd);
 	unlock_slurmctld(job_write_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_checkpoint");
 
 	if (error_code) {
 		if (ckpt_ptr->step_id == SLURM_BATCH_SCRIPT)
@@ -2329,7 +2335,7 @@ inline static void  _slurm_rpc_checkpoint_comp(slurm_msg_t * msg)
 	lock_slurmctld(job_read_lock);
 	error_code = job_step_checkpoint_comp(ckpt_ptr, uid, msg->conn_fd);
 	unlock_slurmctld(job_read_lock);
-	END_TIMER;
+	END_TIMER2("_slurm_rpc_checkpoint_comp");
 
 	if (error_code) {
 		info("_slurm_rpc_checkpoint_comp %u.%u: %s",
@@ -2564,11 +2570,15 @@ inline static void  _slurm_rpc_trigger_clear(slurm_msg_t * msg)
 	int rc;
 	uid_t uid;
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
+	DEF_TIMERS;
 
+	START_TIMER;
 	debug("Processing RPC: REQUEST_TRIGGER_CLEAR");
 	uid = g_slurm_auth_get_uid(msg->auth_cred);
 
 	rc = trigger_clear(uid, trigger_ptr);
+	END_TIMER2("_slurm_rpc_trigger_clear");
+
 	slurm_send_rc_msg(msg, rc);
 }
 
@@ -2578,11 +2588,14 @@ inline static void  _slurm_rpc_trigger_get(slurm_msg_t * msg)
 	trigger_info_msg_t *resp_data;
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
 	slurm_msg_t response_msg;
+	DEF_TIMERS;
 
+	START_TIMER;
 	debug("Processing RPC: REQUEST_TRIGGER_GET");
 	uid = g_slurm_auth_get_uid(msg->auth_cred);
 
 	resp_data = trigger_get(uid, trigger_ptr);
+	END_TIMER2("_slurm_rpc_trigger_get");
 
 	slurm_msg_t_init(&response_msg);
 	response_msg.address  = msg->address;
@@ -2598,11 +2611,15 @@ inline static void  _slurm_rpc_trigger_set(slurm_msg_t * msg)
 	uid_t uid;
 	gid_t gid;
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
+	DEF_TIMERS;
 
+	START_TIMER;
 	debug("Processing RPC: REQUEST_TRIGGER_SET");
 	uid = g_slurm_auth_get_uid(msg->auth_cred);
 	gid = g_slurm_auth_get_gid(msg->auth_cred);
 
 	rc = trigger_set(uid, gid, trigger_ptr);
+	END_TIMER2("_slurm_rpc_trigger_set");
+
 	slurm_send_rc_msg(msg, rc);
 }
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 394e90ff009..ffabdaa0d2d 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -810,10 +810,7 @@ int read_slurm_conf(int recover)
 			old_select_type_p);
 
 	slurmctld_conf.last_update = time(NULL);
-	END_TIMER;
-	debug("read_slurm_conf: finished loading configuration %s",
-	     TIME_STR);
-
+	END_TIMER2("read_slurm_conf");
 	return error_code;
 }
 
-- 
GitLab