diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 65b23b72eee5d3c1d27a069f76754c57d3902df9..3f70a1d6271998715beb6fbd26316848f3f4b029 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -313,6 +313,12 @@ int main(int argc, char *argv[])
 			break;
 	}
 
+	/* NOTE: We don't wait for the agent to complete or clear
+	 * free data structures by default */
+#if	MEM_LEAK_TEST
+	log_fini();
+	xfree(slurmctld_conf.slurm_conf);
+#endif
 	return SLURM_SUCCESS;
 }
 
@@ -615,7 +621,7 @@ static void *_slurmctld_background(void *no_data)
 
 #if	MEM_LEAK_TEST
 	/* This should purge all allocated memory,      *\
-	   \*   Anything left over represents a leak.   */
+	\*   Anything left over represents a leak.   */
 	if (job_list)
 		list_destroy(job_list);
 
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 84cfe1aaed5e9fc068154d5fa2a30ce6b3f299fe..b54fbae47eb5c3493e829cd1177553df466d20a3 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -821,8 +821,8 @@ void rehash_node (void)
 {
 	int i, inx;
 
-	xrealloc (hash_table, (sizeof (int) * node_record_count));
-	memset (hash_table, 0, (sizeof (int) * node_record_count));
+	xfree (hash_table);
+	hash_table = xmalloc (sizeof (int) * node_record_count);
 
 	for (i = 0; i < node_record_count; i++) {
 		if (strlen (node_record_table_ptr[i].name) == 0)
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 014d4acfca042deadc56db292e17267907a154e1..517dacbb50a1f2729a0a06672ca537a13fc22d44 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -1042,10 +1042,6 @@ void build_node_details(struct job_record *job_ptr)
 		job_ptr->node_cnt = node_inx;
 	}
 	job_ptr->num_cpu_groups = cpu_inx + 1;
-	xrealloc(job_ptr->cpus_per_node,
-		 sizeof(uint32_t *) * job_ptr->num_cpu_groups);
-	xrealloc(job_ptr->cpu_count_reps,
-		 sizeof(uint32_t *) * job_ptr->num_cpu_groups);
 }
 
 /*