From 342e5161904afa3d8b2c3b3285e43a1b619e691f Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Mon, 13 Oct 2008 23:41:51 +0000
Subject: [PATCH] svn merge -r15339:15394
 https://eris.llnl.gov/svn/slurm/branches/slurm-1.3

---
 NEWS                                          |   4 +
 auxdir/x_ac_databases.m4                      |   1 +
 config.h.in                                   |   3 +
 configure                                     |   5 +
 doc/html/team.shtml                           |   5 +-
 doc/man/Makefile.am                           |  36 ++
 doc/man/Makefile.in                           |  36 ++
 doc/man/man1/sacctmgr.1                       | 351 ++++++++++++++----
 doc/man/man1/salloc.1                         |   5 +
 doc/man/man1/sbatch.1                         |   5 +
 doc/man/man1/sreport.1                        |  55 ++-
 doc/man/man1/srun.1                           |   5 +
 doc/man/man3/slurm_allocate_resources.3       | 196 +++++++++-
 .../man3/slurm_allocate_resources_blocking.3  |   1 +
 doc/man/man3/slurm_allocation_lookup_lite.3   |   1 +
 .../man3/slurm_allocation_msg_thr_create.3    |   1 +
 .../man3/slurm_allocation_msg_thr_destroy.3   |   1 +
 doc/man/man3/slurm_checkpoint_task_complete.3 |   1 +
 doc/man/man3/slurm_clear_trigger.3            |   1 +
 .../slurm_free_job_alloc_info_response_msg.3  |   1 +
 doc/man/man3/slurm_free_job_info_msg.3        |  13 +-
 doc/man/man3/slurm_free_node_info_msg.3       |   1 +
 doc/man/man3/slurm_free_partition_info_msg.3  |   1 +
 doc/man/man3/slurm_free_slurmd_status.3       |   1 +
 doc/man/man3/slurm_free_trigger_msg.3         |   1 +
 doc/man/man3/slurm_get_checkpoint_file_path.3 |   1 +
 doc/man/man3/slurm_get_triggers.3             |   1 +
 doc/man/man3/slurm_job_step_layout_free.3     |   1 +
 doc/man/man3/slurm_job_step_layout_get.3      |   1 +
 doc/man/man3/slurm_kill_job.3                 |  48 ++-
 doc/man/man3/slurm_load_slurmd_status.3       |   1 +
 doc/man/man3/slurm_notify_job.3               |   1 +
 doc/man/man3/slurm_ping.3                     |   1 +
 doc/man/man3/slurm_print_slurmd_status.3      |   1 +
 doc/man/man3/slurm_read_hostfile.3            |   1 +
 doc/man/man3/slurm_set_debug_level.3          |   1 +
 doc/man/man3/slurm_set_trigger.3              |   1 +
 doc/man/man3/slurm_signal_job.3               |   1 +
 doc/man/man3/slurm_signal_job_step.3          |   1 +
 doc/man/man3/slurm_slurmd_status.3            |  67 ++++
 doc/man/man3/slurm_sprint_job_info.3          |   1 +
 doc/man/man3/slurm_sprint_job_step_info.3     |   1 +
 doc/man/man3/slurm_sprint_node_table.3        |   1 +
 doc/man/man3/slurm_sprint_partition_info.3    |   1 +
 doc/man/man3/slurm_step_ctx_create.3          |  40 +-
 doc/man/man3/slurm_step_ctx_create_no_alloc.3 |   1 +
 .../slurm_step_ctx_daemon_per_node_hack.3     |   1 +
 doc/man/man3/slurm_step_ctx_params_t_init.3   |   1 +
 doc/man/man3/slurm_step_launch.3              |  14 +-
 doc/man/man3/slurm_step_launch_fwd_signal.3   |   1 +
 doc/man/man3/slurm_terminate_job.3            |   1 +
 doc/man/man3/slurm_terminate_job_step.3       |   1 +
 doc/man/man3/slurm_trigger.3                  |   0
 src/api/pmi_server.c                          |   2 +-
 src/api/step_launch.c                         |   4 +
 src/common/assoc_mgr.c                        |   7 +-
 src/common/jobacct_common.c                   |  19 +-
 src/common/log.c                              |  66 +++-
 src/common/node_select.c                      |   3 +-
 src/common/slurm_accounting_storage.c         |  13 +-
 src/common/slurmdbd_defs.c                    |  18 +-
 src/database/mysql_common.c                   |  49 ++-
 .../mysql/accounting_storage_mysql.c          | 208 +++++++----
 .../mysql/mysql_jobacct_process.c             |   8 +-
 .../accounting_storage/mysql/mysql_rollup.c   |  22 +-
 .../slurmdbd/accounting_storage_slurmdbd.c    |   5 +-
 src/plugins/jobcomp/script/jobcomp_script.c   |  47 ++-
 src/plugins/sched/backfill/backfill.c         |  66 +---
 src/plugins/sched/wiki2/job_modify.c          |   2 +-
 .../select/bluegene/plugin/bg_job_place.c     |  14 +-
 .../bluegene/plugin/bg_record_functions.h     |  15 +-
 src/plugins/select/bluegene/plugin/bluegene.c |   2 +-
 src/sacct/print.c                             |  10 +-
 src/sacctmgr/account_functions.c              |  14 +
 src/sacctmgr/association_functions.c          |   6 +
 src/sacctmgr/cluster_functions.c              |   3 +
 src/sacctmgr/common.c                         |  14 +-
 src/sacctmgr/file_functions.c                 | 173 +++++----
 src/sacctmgr/qos_functions.c                  |   3 +
 src/sacctmgr/sacctmgr.c                       |  27 +-
 src/sacctmgr/txn_functions.c                  |  36 +-
 src/sacctmgr/user_functions.c                 |  14 +
 src/salloc/salloc.c                           |  19 +-
 src/slurmctld/agent.c                         |   2 +-
 src/slurmctld/controller.c                    |   8 +-
 src/slurmdbd/proc_req.c                       |   4 +-
 src/sreport/cluster_reports.c                 |  69 +++-
 src/sreport/common.c                          |  54 +--
 src/sreport/job_reports.c                     | 186 ++++++----
 src/sreport/sreport.c                         |  41 +-
 src/sreport/sreport.h                         |   6 +
 src/sreport/user_reports.c                    |  29 +-
 testsuite/expect/test21.20                    |  21 +-
 93 files changed, 1696 insertions(+), 536 deletions(-)
 create mode 100644 doc/man/man3/slurm_allocate_resources_blocking.3
 create mode 100644 doc/man/man3/slurm_allocation_lookup_lite.3
 create mode 100644 doc/man/man3/slurm_allocation_msg_thr_create.3
 create mode 100644 doc/man/man3/slurm_allocation_msg_thr_destroy.3
 create mode 100644 doc/man/man3/slurm_checkpoint_task_complete.3
 create mode 100644 doc/man/man3/slurm_clear_trigger.3
 create mode 100644 doc/man/man3/slurm_free_job_alloc_info_response_msg.3
 create mode 100644 doc/man/man3/slurm_free_node_info_msg.3
 create mode 100644 doc/man/man3/slurm_free_partition_info_msg.3
 create mode 100644 doc/man/man3/slurm_free_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_free_trigger_msg.3
 create mode 100644 doc/man/man3/slurm_get_checkpoint_file_path.3
 create mode 100644 doc/man/man3/slurm_get_triggers.3
 create mode 100644 doc/man/man3/slurm_job_step_layout_free.3
 create mode 100644 doc/man/man3/slurm_job_step_layout_get.3
 create mode 100644 doc/man/man3/slurm_load_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_notify_job.3
 create mode 100644 doc/man/man3/slurm_ping.3
 create mode 100644 doc/man/man3/slurm_print_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_read_hostfile.3
 create mode 100644 doc/man/man3/slurm_set_debug_level.3
 create mode 100644 doc/man/man3/slurm_set_trigger.3
 create mode 100644 doc/man/man3/slurm_signal_job.3
 create mode 100644 doc/man/man3/slurm_signal_job_step.3
 create mode 100644 doc/man/man3/slurm_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_sprint_job_info.3
 create mode 100644 doc/man/man3/slurm_sprint_job_step_info.3
 create mode 100644 doc/man/man3/slurm_sprint_node_table.3
 create mode 100644 doc/man/man3/slurm_sprint_partition_info.3
 create mode 100644 doc/man/man3/slurm_step_ctx_create_no_alloc.3
 create mode 100644 doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3
 create mode 100644 doc/man/man3/slurm_step_ctx_params_t_init.3
 create mode 100644 doc/man/man3/slurm_step_launch_fwd_signal.3
 create mode 100644 doc/man/man3/slurm_terminate_job.3
 create mode 100644 doc/man/man3/slurm_terminate_job_step.3
 create mode 100644 doc/man/man3/slurm_trigger.3

diff --git a/NEWS b/NEWS
index db172b0b1cc..21fbb217897 100644
--- a/NEWS
+++ b/NEWS
@@ -68,6 +68,10 @@ documents those changes that are of interest to users and admins.
     are created in SLURM tables for future use without a reboot of the SLURM
     daemons, but are not reported by any SLURM commands or APIs.
 
+* Changes in SLURM 1.3.10
+=========================
+
+
 * Changes in SLURM 1.3.9
 ========================
  -- Fix jobs being cancelled by ctrl-C to have correct cancelled state in 
diff --git a/auxdir/x_ac_databases.m4 b/auxdir/x_ac_databases.m4
index 776239b8e75..01a3d089e96 100644
--- a/auxdir/x_ac_databases.m4
+++ b/auxdir/x_ac_databases.m4
@@ -93,6 +93,7 @@ AC_DEFUN([X_AC_DATABASES],
             				AC_MSG_RESULT([MySQL (non-threaded) test program built properly.])
             				AC_SUBST(MYSQL_LIBS)
 					AC_SUBST(MYSQL_CFLAGS)
+					AC_DEFINE(MYSQL_NOT_THREAD_SAFE, 1, [Define to 1 if with non thread-safe code])
 					AC_DEFINE(HAVE_MYSQL, 1, [Define to 1 if using MySQL libaries])
 				else
 					MYSQL_CFLAGS=""
diff --git a/config.h.in b/config.h.in
index a1326369959..0c6cf45fef7 100644
--- a/config.h.in
+++ b/config.h.in
@@ -273,6 +273,9 @@
 /* Enable multiple slurmd on one node */
 #undef MULTIPLE_SLURMD
 
+/* Define to 1 if with non thread-safe code */
+#undef MYSQL_NOT_THREAD_SAFE
+
 /* Define to 1 if you are building a production release. */
 #undef NDEBUG
 
diff --git a/configure b/configure
index 35f19c33476..bd46c5d6a27 100755
--- a/configure
+++ b/configure
@@ -25633,6 +25633,11 @@ echo "${ECHO_T}MySQL (non-threaded) test program built properly." >&6; }
 
 
 
+cat >>confdefs.h <<\_ACEOF
+#define MYSQL_NOT_THREAD_SAFE 1
+_ACEOF
+
+
 cat >>confdefs.h <<\_ACEOF
 #define HAVE_MYSQL 1
 _ACEOF
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index a9f285f90f2..e4c8934f260 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -60,12 +60,13 @@ Networking, Italy)</li>
 <li>Federico Sacerdoti (D.E. Shaw)<li>
 <li>Jeff Squyres (LAM MPI)</li>
 <li>Prashanth Tamraparni (HP, India)</li>
-<li>Adam Todorski (Rensselaer Polytechnic Institute)</li
+<li>Adam Todorski (Rensselaer Polytechnic Institute)</li>
 <li>Kevin Tew (LLNL/Bringham Young University)</li>
+<li>Tim Wickberg (Rensselaer Polytechnic Institute)</li>
 <li>Jay Windley (Linux NetworX)</li>
 <li>Anne-Marie Wunderlin (Bull)</li>
 </ul>
 
-<p style="text-align:center;">Last modified 5 September 2008</p>
+<p style="text-align:center;">Last modified 10 October 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index ecc4cfe72cc..800536c2493 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -24,7 +24,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_hostlist_destroy.3 \
 	man3/slurm_hostlist_shift.3 \
 	man3/slurm_allocate_resources.3 \
+	man3/slurm_allocate_resources_blocking.3 \
 	man3/slurm_allocation_lookup.3 \
+	man3/slurm_allocation_lookup_lite.3 \
+	man3/slurm_allocation_msg_thr_create.3 \
+	man3/slurm_allocation_msg_thr_destroy.3 \
 	man3/slurm_api_version.3 \
 	man3/slurm_checkpoint_able.3 \
 	man3/slurm_checkpoint_complete.3 \
@@ -34,27 +38,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_checkpoint_error.3 \
 	man3/slurm_checkpoint_failed.3 \
 	man3/slurm_checkpoint_restart.3 \
+	man3/slurm_checkpoint_task_complete.3 \
 	man3/slurm_checkpoint_vacate.3 \
+	man3/slurm_clear_trigger.3 \
 	man3/slurm_complete_job.3 \
 	man3/slurm_complete_job_step.3 \
 	man3/slurm_confirm_allocation.3 \
 	man3/slurm_free_ctl_conf.3 \
 	man3/slurm_free_job_info_msg.3 \
+	man3/slurm_free_job_alloc_info_response_msg.3 \
 	man3/slurm_free_job_step_create_response_msg.3 \
 	man3/slurm_free_job_step_info_response_msg.3 \
 	man3/slurm_free_node_info.3 \
+	man3/slurm_free_node_info_msg.3 \
 	man3/slurm_free_partition_info.3 \
+	man3/slurm_free_partition_info_msg.3 \
 	man3/slurm_free_resource_allocation_response_msg.3 \
+	man3/slurm_free_slurmd_status.3 \
 	man3/slurm_free_submit_response_response_msg.3 \
+	man3/slurm_free_trigger_msg.3 \
+	man3/slurm_get_checkpoint_file_path.3 \
 	man3/slurm_get_end_time.3 \
 	man3/slurm_get_errno.3 \
 	man3/slurm_get_job_steps.3 \
 	man3/slurm_get_rem_time.3 \
 	man3/slurm_get_select_jobinfo.3 \
+	man3/slurm_get_triggers.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
 	man3/slurm_job_step_create.3 \
 	man3/slurm_job_step_launch_t_init.3 \
+	man3/slurm_job_step_layout_get.3 \
+	man3/slurm_job_step_layout_free.3 \
 	man3/slurm_job_will_run.3 \
 	man3/slurm_jobinfo_ctx_get.3 \
 	man3/slurm_kill_job.3 \
@@ -64,8 +79,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
 	man3/slurm_load_partitions.3 \
+	man3/slurm_load_slurmd_status.3 \
+	man3/slurm_notify_job.3 \
 	man3/slurm_perror.3 \
 	man3/slurm_pid2jobid.3 \
+	man3/slurm_ping.3 \
 	man3/slurm_print_ctl_conf.3 \
 	man3/slurm_print_job_info.3 \
 	man3/slurm_print_job_info_msg.3 \
@@ -75,20 +93,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_print_node_table.3 \
 	man3/slurm_print_partition_info.3 \
 	man3/slurm_print_partition_info_msg.3 \
+	man3/slurm_print_slurmd_status.3 \
+	man3/slurm_read_hostfile.3 \
 	man3/slurm_reconfigure.3 \
 	man3/slurm_resume.3 \
 	man3/slurm_requeue.3 \
+	man3/slurm_set_debug_level.3 \
+	man3/slurm_set_trigger.3 \
 	man3/slurm_shutdown.3 \
+	man3/slurm_signal_job.3 \
+	man3/slurm_signal_job_step.3 \
+	man3/slurm_slurmd_status.3 \
+	man3/slurm_sprint_job_info.3 \
+	man3/slurm_sprint_job_step_info.3 \
+	man3/slurm_sprint_node_table.3 \
+	man3/slurm_sprint_partition_info.3 \
 	man3/slurm_step_ctx_create.3 \
+	man3/slurm_step_ctx_create_no_alloc.3 \
+	man3/slurm_step_ctx_daemon_per_node_hack.3 \
 	man3/slurm_step_ctx_destroy.3 \
+	man3/slurm_step_ctx_params_t_init.3 \
 	man3/slurm_step_ctx_get.3 \
 	man3/slurm_step_launch.3 \
+	man3/slurm_step_launch_fwd_signal.3 \
 	man3/slurm_step_launch_abort.3 \
 	man3/slurm_step_launch_wait_finish.3 \
 	man3/slurm_step_launch_wait_start.3 \
 	man3/slurm_strerror.3 \
 	man3/slurm_submit_batch_job.3 \
 	man3/slurm_suspend.3 \
+	man3/slurm_terminate_job.3 \
+	man3/slurm_terminate_job_step.3 \
+	man3/slurm_trigger.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
 	man3/slurm_update_partition.3
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index 7aaf763ccfe..5ae613a5ef9 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -265,7 +265,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_hostlist_destroy.3 \
 	man3/slurm_hostlist_shift.3 \
 	man3/slurm_allocate_resources.3 \
+	man3/slurm_allocate_resources_blocking.3 \
 	man3/slurm_allocation_lookup.3 \
+	man3/slurm_allocation_lookup_lite.3 \
+	man3/slurm_allocation_msg_thr_create.3 \
+	man3/slurm_allocation_msg_thr_destroy.3 \
 	man3/slurm_api_version.3 \
 	man3/slurm_checkpoint_able.3 \
 	man3/slurm_checkpoint_complete.3 \
@@ -275,27 +279,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_checkpoint_error.3 \
 	man3/slurm_checkpoint_failed.3 \
 	man3/slurm_checkpoint_restart.3 \
+	man3/slurm_checkpoint_task_complete.3 \
 	man3/slurm_checkpoint_vacate.3 \
+	man3/slurm_clear_trigger.3 \
 	man3/slurm_complete_job.3 \
 	man3/slurm_complete_job_step.3 \
 	man3/slurm_confirm_allocation.3 \
 	man3/slurm_free_ctl_conf.3 \
 	man3/slurm_free_job_info_msg.3 \
+	man3/slurm_free_job_alloc_info_response_msg.3 \
 	man3/slurm_free_job_step_create_response_msg.3 \
 	man3/slurm_free_job_step_info_response_msg.3 \
 	man3/slurm_free_node_info.3 \
+	man3/slurm_free_node_info_msg.3 \
 	man3/slurm_free_partition_info.3 \
+	man3/slurm_free_partition_info_msg.3 \
 	man3/slurm_free_resource_allocation_response_msg.3 \
+	man3/slurm_free_slurmd_status.3 \
 	man3/slurm_free_submit_response_response_msg.3 \
+	man3/slurm_free_trigger_msg.3 \
+	man3/slurm_get_checkpoint_file_path.3 \
 	man3/slurm_get_end_time.3 \
 	man3/slurm_get_errno.3 \
 	man3/slurm_get_job_steps.3 \
 	man3/slurm_get_rem_time.3 \
 	man3/slurm_get_select_jobinfo.3 \
+	man3/slurm_get_triggers.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
 	man3/slurm_job_step_create.3 \
 	man3/slurm_job_step_launch_t_init.3 \
+	man3/slurm_job_step_layout_get.3 \
+	man3/slurm_job_step_layout_free.3 \
 	man3/slurm_job_will_run.3 \
 	man3/slurm_jobinfo_ctx_get.3 \
 	man3/slurm_kill_job.3 \
@@ -305,8 +320,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
 	man3/slurm_load_partitions.3 \
+	man3/slurm_load_slurmd_status.3 \
+	man3/slurm_notify_job.3 \
 	man3/slurm_perror.3 \
 	man3/slurm_pid2jobid.3 \
+	man3/slurm_ping.3 \
 	man3/slurm_print_ctl_conf.3 \
 	man3/slurm_print_job_info.3 \
 	man3/slurm_print_job_info_msg.3 \
@@ -316,20 +334,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_print_node_table.3 \
 	man3/slurm_print_partition_info.3 \
 	man3/slurm_print_partition_info_msg.3 \
+	man3/slurm_print_slurmd_status.3 \
+	man3/slurm_read_hostfile.3 \
 	man3/slurm_reconfigure.3 \
 	man3/slurm_resume.3 \
 	man3/slurm_requeue.3 \
+	man3/slurm_set_debug_level.3 \
+	man3/slurm_set_trigger.3 \
 	man3/slurm_shutdown.3 \
+	man3/slurm_signal_job.3 \
+	man3/slurm_signal_job_step.3 \
+	man3/slurm_slurmd_status.3 \
+	man3/slurm_sprint_job_info.3 \
+	man3/slurm_sprint_job_step_info.3 \
+	man3/slurm_sprint_node_table.3 \
+	man3/slurm_sprint_partition_info.3 \
 	man3/slurm_step_ctx_create.3 \
+	man3/slurm_step_ctx_create_no_alloc.3 \
+	man3/slurm_step_ctx_daemon_per_node_hack.3 \
 	man3/slurm_step_ctx_destroy.3 \
+	man3/slurm_step_ctx_params_t_init.3 \
 	man3/slurm_step_ctx_get.3 \
 	man3/slurm_step_launch.3 \
+	man3/slurm_step_launch_fwd_signal.3 \
 	man3/slurm_step_launch_abort.3 \
 	man3/slurm_step_launch_wait_finish.3 \
 	man3/slurm_step_launch_wait_start.3 \
 	man3/slurm_strerror.3 \
 	man3/slurm_submit_batch_job.3 \
 	man3/slurm_suspend.3 \
+	man3/slurm_terminate_job.3 \
+	man3/slurm_terminate_job_step.3 \
+	man3/slurm_trigger.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
 	man3/slurm_update_partition.3
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index 47895482317..cc6669ec925 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -218,13 +218,19 @@ To clear a previously set value use the modify command with a new value of \-1.
 \fIGrpCPUMins\fP=<max cpu hours> 
 Maximum number of CPU hours running jobs are able to be allocated in aggregate for 
 this association and all association which are children of this association.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIGrpCPUs\fP=<max cpus>
 Maximum number of CPUs running jobs are able to be allocated in aggregate for 
 this association and all association which are children of this association.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIGrpJobs\fP=<max cpus>
@@ -256,14 +262,20 @@ To clear a previously set value use the modify command with a new value of \-1.
 Maximum number of CPU minutes each job is able to use in this account.
 This is overridden if set directly on a user. 
 Default is the cluster's limit.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIMaxCPUs\fP=<max cpus>
 Maximum number of CPUs each job is able to use in this account.
 This is overridden if set directly on a user. 
 Default is the cluster's limit.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIMaxJobs\fP=<max jobs>
@@ -315,11 +327,15 @@ Name of SLURM partition these limits apply to.
 
 .TP
 \fIQosLevel\fP<operator><comma separated list of qos names>
-Specify Quality of Service that jobs are to run at for this account.
-Now consisting of Normal, Standby, Expedite, and Exempt.
-This is overridden if set directly on a user. 
-Setting an account's QosLevel to '' (two single quotes with nothing 
-between them) restores it's default setting.
+(For use with MOAB only.)
+Specify the default Quality of Service's that jobs are able to run at
+for this account.  To get a list of vaild QOS's use 'sacctmgr list qos'. 
+This value will override it's parents value and push down to it's
+childern as the new default.  Setting a QosLevel to '' (two single
+quotes with nothing between them) restores it's default setting.  You
+can also use the operator += and \-= to add or remove certain QOS's
+from a QOS list.
+
 Valid <operator> values include:
 .RS
 .TP 5
@@ -346,13 +362,19 @@ To clear a previously set value use the modify command with a new value of \-1.
 \fIGrpCPUMins\fP=<max cpu hours> 
 Maximum number of CPU hours running jobs are able to be allocated in aggregate for 
 this association and all association which are children of this association.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIGrpCPUs\fP=<max cpus>
 Maximum number of CPUs running jobs are able to be allocated in aggregate for 
 this association and all association which are children of this association.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIGrpJobs\fP=<max cpus>
@@ -384,7 +406,20 @@ To clear a previously set value use the modify command with a new value of \-1.
 Maximum number of CPU minutes each job is able to use in this account.
 This is overridden if set directly on an account or user. 
 Default is no limit.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+ 
+.TP
+\fIMaxCPUs\fP=<max cpus>
+Maximum number of cpus each job is able to use in this account.
+This is overridden if set directly on an account or user. 
+Default is no limit.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIMaxJobs\fP=<max jobs>
@@ -425,11 +460,13 @@ configuration file for some Slurm\-managed cluster.
 
 .TP
 \fIQosLevel\fP<operator><comma separated list of qos names>
-Specify Quality of Service that jobs are to run at for this account.
-Now consisting of Normal, Standby, Expedite, and Exempt.
-This is overridden if set directly on an account user. 
-Setting an account's QosLevel to '' (two single quotes with nothing 
-between them) restores it's default setting.
+(For use with MOAB only.)
+Specify the default Quality of Service's that jobs are able to run at
+for this cluster.  To get a list of vaild QOS's use 'sacctmgr list qos'. 
+This value is overridden if a child has a QOS value directly set.
+Setting a QosLevel to '' (two single quotes with nothing between them)
+restores it's default setting.  You can also use the operator += and
+\-= to add or remove certain QOS's from a QOS list.
 Valid <operator> values include:
 .RS
 .TP 5
@@ -492,13 +529,19 @@ To clear a previously set value use the modify command with a new value of \-1.
 .TP
 \fIMaxCPUMins\fP=<max cpu minutes> 
 Maximum number of CPU minutes each job is able to use for this user.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIMaxCPUs\fP=<max cpus>
 Maximum number of CPUs each job is able to use for this user.
 Default is the account's limit.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
 
 .TP
 \fIMaxJobs\fP=<max jobs>
@@ -533,12 +576,13 @@ Name of SLURM partition these limits apply to.
 
 .TP
 \fIQosLevel\fP<operator><comma separated list of qos names>
-Specify Quality of Service that jobs are to run at for this account.
-Now consisting of Normal, Standby, Expedite, and Exempt.
-This is overridden if set directly on an account user. 
-Setting an account's QosLevel to '' (two single quotes with nothing 
-between them) restores it's default setting.
-Valid <operator> values include:
+(For use with MOAB only.)
+Specify the default Quality of Service's that jobs are able to run at
+for this user.  To get a list of vaild QOS's use 'sacctmgr list qos'. 
+This value will override it's parents value.
+Setting a QosLevel to '' (two single quotes with nothing between them)
+restores it's default setting.  You can also use the operator += and
+\-= to add or remove certain QOS's from a QOS list.
 .RS
 .TP 5
 \fB=\fR
@@ -591,71 +635,180 @@ is always a default for any cluster and does not need to be defined.
 
 To edit/create a file start with a cluster line for the new cluster
 
-\fBCluster\ \-\ cluster_name\fP
+\fBCluster\ \-\ cluster_name:MaxNodesPerJob=15\fP
+
+Anything included on this line will be the defaults for all
+associations on this cluster.  These options are as follows...
+.TP
+GrpCPUMins=  
+Maximum number of CPU hours running jobs are able to
+be allocated in aggregate for this association and all association
+which are children of this association. (NOTE: this limit is not
+currently enforced in SLURM. You can still set this, but have to wait
+for future versions of SLURM before it is enforced.)
+.TP
+GrpCPUs= 
+Maximum number of CPUs running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. (NOTE: this limit is not currently
+enforced in SLURM. You can still set this, but have to wait for future
+versions of SLURM before it is enforced.)
+.TP
+GrpJobs= 
+Maximum number of running jobs in aggregate for this
+association and all association which are children of this association.
+.TP
+GrpNodes= 
+Maximum number of nodes running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association.
+.TP
+GrpSubmitJobs= 
+Maximum number of jobs which can be in a pending or
+running state at any time in aggregate for this association and all
+association which are children of this association. 
+.TP
+GrpWall= 
+Maximum wall clock time running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. 
+.TP
+FairShare= 
+To be used with a scheduler like MOAB to determine priority.
+.TP
+MaxJobs= 
+Maximum number of jobs the children of this account can run.
+.TP
+MaxNodesPerJob= 
+Maximum number of nodes per job the children of this account can run.
+.TP
+MaxProcSecondsPerJob= 
+Maximum cpu seconds children of this accounts jobs can run.
+.TP
+MaxWallDurationPerJob= 
+Maximum time (not related to job size) children of this accounts jobs can run.
+.TP
+QOS= 
+Comma separated list of Quality of Service names (Defined in sacctmgr).
+.TP
 
 Followed by Accounts you want in this fashion...
 
-\fBAccount\ \-\ cs:MaxNodesPerJob=5:MaxJobs=4:MaxProcSecondsPerJob=20:FairShare=399:MaxWallDurationPerJob=40:Description='Computer Science':Organization='LC'\fP
+\fBParent\ \-\ root\fP (Defined by default)
+.br
+\fBAccount\ \-\ cs\fP:MaxNodesPerJob=5:MaxJobs=4:MaxProcSecondsPerJob=20:FairShare=399:MaxWallDurationPerJob=40:Description='Computer Science':Organization='LC'
+.br
+\fBParent\ \-\ cs\fP
+.br
+\fBAccount\ \-\ test\fP:MaxNodesPerJob=1:MaxJobs=1:MaxProcSecondsPerJob=1:FairShare=1:MaxWallDurationPerJob=1:Description='Test Account':Organization='Test'
 
+.TP
 Any of the options after a ':' can be left out and they can be in any order.
 If you want to add any sub accounts just list the Parent THAT HAS ALREADY 
 BEEN CREATED before the account line in this fashion...
-
+.TP
 All account options are
-.br
-Description= \- a brief description of the account
-.br
-FairShare= \- to be used with a scheduler like MOAB to determine priority
-.br
-MaxJobs= \- maximum number of jobs the children of this account can run
-.br
-MaxNodesPerJob= \- maximum number of nodes per job the children of this
-account can run
-.br
-MaxProcSecondsPerJob= \- maximum cpu seconds children of this accounts
-jobs can run
-.br
-MaxWallDurationPerJob= \- maximum time (not related to job size)
-children of this accounts jobs can run
-.br
-Organization= \- Name of organization that owns this account
-.br
-QOS= \- Comma separated list of Quality of Service names (Defined in sacctmgr)
-.br
-
-\fBParent\ \-\ cs
-.br
-Account\ \-\ test:MaxNodesPerJob=1:MaxJobs=1:MaxProcSecondsPerJob=1:FairShare=1:MaxWallDurationPerJob=1:Description='Test Account':Organization='Test'\fP
+.TP
+Description= 
+A brief description of the account.
+.TP
+GrpCPUMins=  
+Maximum number of CPU hours running jobs are able to
+be allocated in aggregate for this association and all association
+which are children of this association. (NOTE: this limit is not
+currently enforced in SLURM. You can still set this, but have to wait
+for future versions of SLURM before it is enforced.)
+.TP
+GrpCPUs= 
+Maximum number of CPUs running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. (NOTE: this limit is not currently
+enforced in SLURM. You can still set this, but have to wait for future
+versions of SLURM before it is enforced.)
+.TP
+GrpJobs= 
+Maximum number of running jobs in aggregate for this
+association and all association which are children of this association.
+.TP
+GrpNodes= 
+Maximum number of nodes running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association.
+.TP
+GrpSubmitJobs= 
+Maximum number of jobs which can be in a pending or
+running state at any time in aggregate for this association and all
+association which are children of this association. 
+.TP
+GrpWall= 
+Maximum wall clock time running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. 
+.TP
+FairShare= 
+To be used with a scheduler like MOAB to determine priority.
+.TP
+MaxJobs= 
+Maximum number of jobs the children of this account can run.
+.TP
+MaxNodesPerJob= 
+Maximum number of nodes per job the children of this account can run.
+.TP
+MaxProcSecondsPerJob= 
+Maximum cpu seconds children of this accounts jobs can run.
+.TP
+MaxWallDurationPerJob= 
+Maximum time (not related to job size) children of this accounts jobs can run.
+.TP
+Organization= 
+Name of organization that owns this account.
+.TP
+QOS(=,+=,\-=) 
+Comma separated list of Quality of Service names (Defined in sacctmgr).
+.TP
 
+.TP
 To add users to a account add a line like this after a Parent \- line
+\fBParent\ \-\ test\fP
+.br
+\fBUser\ \-\ adam\fP:MaxNodesPerJob=2:MaxJobs=3:MaxProcSecondsPerJob=4:FairShare=1:MaxWallDurationPerJob=1:AdminLevel=Operator:Coordinator='test'
 
-\fBUser\ \-\ lipari:MaxNodesPerJob=2:MaxJobs=3:MaxProcSecondsPerJob=4:FairShare=1:MaxWallDurationPerJob=1:AdminLevel=Operator:Coordinator='test'\fP
-
+.TP
 All user options are
+.TP
+AdminLevel=
+Type of admin this user is (Administrator, Operator)
 .br
-AdminLevel= \- Type of admin this user is (Administrator, Operator)
 \fBMust be defined on the first occurrence of the user.\fP
+.TP
+Coordinator=
+Comma separated list of accounts this user is coordinator over
 .br
-Coordinator= \- Comma separated list of accounts this user is
-coordinator over \fBMust be defined on the first occurrence of the user.\fP
-.br
-DefaultAccount= \- system wide default account name
 \fBMust be defined on the first occurrence of the user.\fP
+.TP
+DefaultAccount=
+system wide default account name
 .br
-FairShare= \- to be used with a scheduler like MOAB to determine priority
-.br
-MaxJobs= \- maximum number of jobs this user can run
-.br
-MaxNodesPerJob= \- maximum number of nodes per job this user can run
-.br
-MaxProcSecondsPerJob= \- maximum cpu seconds this user can run per job
-.br
-MaxWallDurationPerJob= \- maximum time (not related to job size) this
-user can run
-.br
-QOS= \- Comma separated list of Quality of Service names (Defined in sacctmgr)
 \fBMust be defined on the first occurrence of the user.\fP
-.br
+.TP
+FairShare= 
+To be used with a scheduler like MOAB to determine priority.
+.TP
+MaxJobs= 
+Maximum number of jobs this user can run.
+.TP
+MaxNodesPerJob= 
+Maximum number of nodes per job this user can run.
+.TP
+MaxProcSecondsPerJob= 
+Maximum cpu seconds this user can run per job.
+.TP
+MaxWallDurationPerJob= 
+Maximum time (not related to job size) this user can run.
+.TP
+QOS(=,+=,\-=) 
+Comma separated list of Quality of Service names (Defined in sacctmgr).
+.RE
 
 
 .SH "EXAMPLES"
@@ -671,13 +824,65 @@ QOS= \- Comma separated list of Quality of Service names (Defined in sacctmgr)
 .br
 > sacctmgr create user name=adam cluster=tux account=physics fairshare=10
 .br
-> sacctmgr modify user with name=adam cluster=tux account=physics set
+> sacctmgr modify user name=adam cluster=tux account=physics set
   maxjobs=2 maxtime=30:00
 .br
 > sacctmgr dump cluster=tux tux_data_file
 .br
 > sacctmgr load tux_data_file
 .br
+
+.br
+When modifying an object placing the key words 'set' and the
+optional 'where' is crtical to perform correctly below are examples to
+produce correct results.  As a rule of thumb any thing you put infront
+of the set will be used as a quantifier.  If you want to put a
+quantifier after the key word 'set' you should use the key
+word 'where'.
+.br
+
+.br
+wrong> sacctmgr modify user name=adam set fairshare=10 cluster=tux
+.br
+
+.br
+This will produce an error as the above line reads modify user adam
+set fairshare=10 and cluster=tux.  
+.br
+
+.br
+right> sacctmgr modify user name=adam cluster=tux set fairshare=10
+.br
+right> sacctmgr modify user name=adam set fairshare=10 where cluster=tux 
+.br
+
+.br
+(For use with MOAB only)
+When changing qos for something only use the '=' operator when wanting
+to explitally set the qos to something.  In most cases you will want
+to use the '+=' or '-=' operator to either add to or remove from the
+existing qos already in place.
+.br
+
+.br
+If a user already has qos of normal,standby for a parent or it was
+explicitly set you should use qos+=expedite to add this to the list in
+this fashon.
+.br
+
+.br
+> sacctmgr modify user name=adam set qos+=expedite
+.br
+
+.br
+If you are looking to only add the qos expedite to only a certain
+accoun and or cluster you can do that by specifing them in the
+sacctmgr line.
+.br
+
+.br
+> sacctmgr modify user name=adam acct=this cluster=tux set qos+=expedite
+.br
 .ec
 
 .SH "COPYING"
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index b444ca61998..c4451cfaa7a 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -100,6 +100,11 @@ The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
 For example: \fB\-\-constraint="opteron&video"\fR or 
 \fB\-\-constraint="fast|faster"\fR.
+In the first example, only nodes having both the feature "opteron" AND
+the feature "video" will be used.
+There is no mechanism to specify that you want one node with feature
+"opteron" and another node with feature "video" in that case that no
+node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
 For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index e8283f90c6f..45290f9ce7f 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -90,6 +90,11 @@ The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
 For example: \fB\-\-constraint="opteron&video"\fR or 
 \fB\-\-constraint="fast|faster"\fR.
+In the first example, only nodes having both the feature "opteron" AND
+the feature "video" will be used.
+There is no mechanism to specify that you want one node with feature
+"opteron" and another node with feature "video" in that case that no
+node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
 For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 9e532c9c4db..aa2ef4857a4 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -116,7 +116,7 @@ This is an independent command with no options meant for use in interactive mode
 .TP
 \fBversion\fP
 Display the version number of sreport being executed.
-     -q or --quiet: equivalent to \"quiet\" command                        \n\
+     \-q or \-\-quiet: equivalent to \"quiet\" command                        \n\
 
 .TP
 \fB!!\fP
@@ -128,9 +128,48 @@ Repeat the last command executed.
 
 .TP
 Various reports are as follows...
-     cluster - Utilization
-     job     - Sizes
-     user    - TopUsage
+     cluster \- AccountUtilizationByUser, UserUtilizationByAccount, Utilization
+     job     \- Sizes
+     user    \- TopUsage
+
+.TP
+
+.TP
+REPORT DESCRIPTION
+.RS
+.TP
+.B cluster AccountUtilizationByUser 
+This report will display account utilization as it appears on the
+hierarchical tree.  Starting with the specified account or the
+root account by default this report will list the underlying
+usage with a sum on each level.  Use the 'tree' option to span
+the tree for better visibility.
+.TP 
+.B cluster UserUtilizationByAccount
+This report will display users by account in order of utilization without
+grouping multiple accounts by user into one, but displaying them
+on separate lines.
+.TP
+.B cluster Utilization
+This report will display total usage divided by Allocated, Down,
+Idle, and resrved time for selected clusters.  Reserved time
+refers to time that a job was waiting for resources after the job
+had become eligible.  If the value is not of importance for you
+the number should be grouped with idle time.
+
+.TP
+.B job Sizes
+This report will dispay the amount of time used for job ranges
+specified by the 'grouping=' option.  Only a single level in the tree
+is displayed defaulting to the root dir.  If you specify other
+accounts with the 'account=' option you will receive those accounts
+sub accounts.
+
+.TP
+.B user TopUsage
+Displays the top users on a cluster.  Use the group option to group
+accounts together.  The default is to have a different line for each
+user account combination.  
 
 .TP
 Each report type has various options...
@@ -158,6 +197,10 @@ CLUSTER
 .TP
 .B Names=<OPT>
 List of clusters to include in report.  Default is local cluster.
+.TP
+.B Tree
+When used with the AccountUtilizationByUser report will span the
+accounts as they in the hierarchy.
 .RE
 
 .TP
@@ -182,6 +225,10 @@ List of jobs/steps to include in report.  Default is all.
 .B Partitions=<OPT>
 List of partitions jobs ran on to include in report.  Default is all.
 .TP
+.B PrintJobCount
+When used with the Sizes report will print number of jobs ran instead
+of time used.  
+.TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
 .RE
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index c3a4228f487..19c7845d6b6 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -98,6 +98,11 @@ The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
 For example: \fB\-\-constraint="opteron&video"\fR or 
 \fB\-\-constraint="fast|faster"\fR.
+In the first example, only nodes having both the feature "opteron" AND
+the feature "video" will be used.
+There is no mechanism to specify that you want one node with feature
+"opteron" and another node with feature "video" in that case that no
+node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
 For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index e74f053d21e..d157071c98f 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -1,9 +1,11 @@
 .TH "Slurm API" "3" "April 2006" "Morris Jette" "Slurm job initiation functions"
 .SH "NAME"
-slurm_allocate_resources, 
-slurm_allocation_lookup, slurm_confirm_allocation, 
+slurm_allocate_resources, slurm_allocate_resources_blocking,
+slurm_allocation_msg_thr_create, slurm_allocation_msg_thr_destroy,
+slurm_allocation_lookup, slurm_allocation_lookup_lite,
+slurm_confirm_allocation, 
 slurm_free_submit_response_response_msg, slurm_init_job_desc_msg, 
-slurm_job_will_run, slurm_submit_batch_job
+slurm_job_will_run, slurm_read_hostfile, slurm_submit_batch_job
 \- Slurm job initiation functions
 .SH "SYNTAX"
 .LP 
@@ -17,6 +19,28 @@ int \fBslurm_allocate_resources\fR (
 .br 
 );
 .LP 
+resource_allocation_response_msg_t *\fBslurm_allocate_resources_blocking\fR (
+.br 
+	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
+.br 
+	time_t \fItimeout\fP, void \fI(*pending_callback)(uint32_t job_id)\fP
+.br 
+);
+.LP 
+allocation_msg_thread_t *\fBslurm_allocation_msg_thr_create\fR (
+.br 
+	uint16_t *\fIport\fP,
+.br 
+	slurm_allocation_callbacks_t *\fIcallbacks\fP
+.br 
+);
+.LP 
+void *\fBslurm_allocation_msg_thr_destroy\fR (
+.br 
+	allocation_msg_thread_t *\fIslurm_alloc_msg_thr_ptr\fP
+.br 
+);
+.LP 
 int \fBslurm_allocation_lookup\fR {
 .br
 	uint32_t \fIjobid\fP,
@@ -25,6 +49,14 @@ int \fBslurm_allocation_lookup\fR {
 .br
 );
 .LP 
+int \fBslurm_allocation_lookup_lite\fR {
+.br
+	uint32_t \fIjobid\fP,
+.br
+	resource_allocation_response_msg_t **\fIslurm_alloc_msg_pptr\fP
+.br
+);
+.LP 
 int \fBslurm_confirm_allocation\fR (
 .br 
 	old_job_alloc_msg_t *\fIold_job_desc_msg_ptr\fP,
@@ -57,6 +89,12 @@ int \fBslurm_job_will_run\fR (
 .br 
 );
 .LP
+int \fBslurm_read_hostfile\fR (
+.br 
+	char *\fIfilename\fP, int \fIn\fP
+.br 
+);
+.LP
 int \fBslurm_submit_batch_job\fR (
 .br 
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
@@ -70,6 +108,10 @@ int \fBslurm_submit_batch_job\fR (
 \fIjob_desc_msg_ptr\fP
 Specifies the pointer to a job request specification. See slurm.h for full details 
 on the data structure's contents. 
+.TP
+\fIcallbacks\fP
+Specifies the pointer to a allocation callbacks structure.  See
+slurm.h for full details on the data structure's contents.
 .TP 
 \fIold_job_desc_msg_ptr\fP
 Specifies the pointer to a description of an existing job. See slurm.h for 
@@ -83,13 +125,21 @@ structure's contents.
 .TP 
 \fIslurm_alloc_msg_ptr\fP
 Specifies the pointer to the structure to be created and filled in by the function 
-\fIslurm_allocate_resources\fP, \fIslurm_allocation_lookup\fP, 
+\fIslurm_allocate_resources\fP,
+\fIslurm_allocate_resources_blocking\fP,
+\fIslurm_allocation_lookup\fP, \fIslurm_allocation_lookup_lite\fP, 
 \fIslurm_confirm_allocation\fP or \fIslurm_job_will_run\fP.
 .TP 
+\fIslurm_alloc_msg_thr_ptr\fP
+Specigies the pointer to the structure created and returned by the
+function \fIslurm_allocation_msg_thr_create\fP.  Must be destroyed
+with function \fIslurm_allocation_msg_thr_destroy\fP.
+.TP 
 \fIslurm_submit_msg_pptr\fP
 Specifies the double pointer to the structure to be created and filled with a description 
-of the created job: job ID, etc. See slurm.h for full details on the data structure's contents. 
-.TP 
+of the created job: job ID, etc. See slurm.h for full details on the
+data structure's contents. 
+.TP
 \fIslurm_submit_msg_ptr\fP
 Specifies the pointer to the structure to be created and filled in by the function \fIslurm_submit_batch_job\fP.
 .SH "DESCRIPTION"
@@ -100,16 +150,43 @@ count or time allocation are outside of the partition's limits then a job
 entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left 
 queued until the partition's limits are changed.
 Always release the response message when no longer required using 
-the function \fBslurm_free_resource_allocation_response_msg\fR.
-.LP 
+the function \fBslurm_free_resource_allocation_response_msg\fR.  This
+function only makes the request once.  If the allocation is not
+avaliable immediately the node_cnt variable in the resp will be 0.  If
+you want a function that will block until either an error is recieved
+or an allocation is granted you can use the
+\fIslurm_allocate_resources_blocking\fP function described below. 
+.LP
+\fBslurm_allocate_resources_blocking\fR Request a resource allocation for a
+job.  This call will block until the allocation is granted, an error
+occurs, or the specified timeout limit is reached.  The \fIpending_callback\fP
+parameter will be called if the allocation is not avaliable
+immediately and the immedite flag is not set in the request.  This can
+be used to get the jobid of the job while waiting for the allocation
+to become avaliable.  On failure NULL is returned and errno is set.
+.LP
+\fBslurm_allocation_msg_thr_create\fR Startup a message handler
+talking with the controller dealing with messages from the controller
+during an allocation. Callback functions are declared in the
+\fIcallbacks\fP parameter and will be called when a corresponding
+message is recieved from the controller.  This message thread is
+needed to receive messages from the controller about node failure in
+an allocation and other important messages.  Although technically not
+required, it could be very helpful to inform about problems with the
+allocation. 
+.LP
+\fBslurm_allocation_msg_thr_destroy\fR Shutdown the message handler
+ talking with the controller dealing with messages from the controller during
+ an allocation. 
+.LP
 \fBslurm_confirm_allocation\fR Return detailed information on a specific 
 existing job allocation. \fBOBSOLETE FUNCTION: Use slurm_allocation_lookup
 instead.\fR This function may only be successfully executed by the job's 
 owner or user root.
 .LP 
 \fBslurm_free_resource_allocation_response_msg\fR Release the storage generated in response 
-to a call of the function \fBslurm_allocate_resources\fR, or
-\fBslurm_allocation_lookup\fR.
+to a call of the function \fBslurm_allocate_resources\fR, 
+\fBslurm_allocation_lookup\fR, or \fBslurm_allocation_lookup_lite\fR.
 .LP 
 \fBslurm_free_submit_response_msg\fR Release the storage generated in response 
 to a call of the function \fBslurm_submit_batch_job\fR.
@@ -119,6 +196,13 @@ Execute this function before issuing a request to submit or modify a job.
 .LP 
 \fBslurm_job_will_run\fR Determine if the supplied job description could be executed immediately. 
 .LP 
+\fBslurm_read_hostfile\fR Read a SLURM hostfile specified by
+"filename".  "filename" must contain a list of SLURM NodeNames, one
+per line.  Reads up to "n" number of hostnames from the file. Returns
+a string representing a hostlist ranged string of the contents
+of the file.  This is a helper function, it does not contact any SLURM
+daemons.   
+.LP 
 \fBslurm_submit_batch_job\fR Submit a job for later execution. Note that if 
 the job's requested node count or time allocation are outside of the partition's limits then a job entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left queued until the partition's limits are changed and resources are available.  Always release the response message when no 
 longer required using the function \fBslurm_free_submit_response_msg\fR.
@@ -186,10 +270,14 @@ the partition's time limit.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
 SLURM controller.
-.SH "EXAMPLE"
+.SH "NON-BLOCKING EXAMPLE"
 .LP 
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
+#include <signal.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -202,16 +290,20 @@ int main (int argc, char *argv[])
 .br 
 	resource_allocation_response_msg_t* slurm_alloc_msg_ptr ;
 .LP 
-	slurm_init_job_desc_msg( &job_mesg );
+	slurm_init_job_desc_msg( &job_desc_msg );
 .br 
-	job_mesg. name = ("job01\0");
+	job_desc_msg. name = ("job01\0");
 .br 
-	job_mesg. min_memory = 1024;
+	job_desc_msg. job_min_memory = 1024;
 .br 
-	job_mesg. time_limit = 200;
+	job_desc_msg. time_limit = 200;
 .br 
-	job_mesg. num_nodes = 400;
+	job_desc_msg. min_nodes = 400;
 .br 
+	job_desc_msg. user_id = getuid();
+.br
+	job_desc_msg. group_id = getgid();
+.br
 	if (slurm_allocate_resources(&job_desc_msg,
 .br
 	                             &slurm_alloc_msg_ptr)) {
@@ -228,9 +320,77 @@ int main (int argc, char *argv[])
 .br
 	        slurm_alloc_msg_ptr\->job_id );
 .br 
-	if (slurm_job_kill(slurm_alloc_msg_ptr\->
+	if (slurm_kill_job(slurm_alloc_msg_ptr\->job_id, SIGKILL, 0)) {
+.br 
+		printf ("kill errno %d\\n", slurm_get_errno());
+.br 
+		exit (1);
+.br 
+	}
+.br
+	printf ("canceled job_id %u\\n", 
+.br
+	        slurm_alloc_msg_ptr\->job_id );
+.br 
+	slurm_free_resource_allocation_response_msg(
+.br
+			slurm_alloc_msg_ptr);
+.br 
+	exit (0);
+.br 
+}
+
+.SH "BLOCKING EXAMPLE"
+.LP 
+#include <stdio.h>
+.br
+#include <stdlib.h>
 .br
-	                     job_id, SIGKILL)) {
+#include <signal.h>
+.br
+#include <slurm/slurm.h>
+.br
+#include <slurm/slurm_errno.h>
+.LP 
+int main (int argc, char *argv[])
+.br 
+{
+.br 
+	job_desc_msg_t job_desc_msg;
+.br 
+	resource_allocation_response_msg_t* slurm_alloc_msg_ptr ;
+.LP 
+	slurm_init_job_desc_msg( &job_desc_msg );
+.br 
+	job_desc_msg. name = ("job01\0");
+.br 
+	job_desc_msg. job_min_memory = 1024;
+.br 
+	job_desc_msg. time_limit = 200;
+.br 
+	job_desc_msg. min_nodes = 400;
+.br 
+	job_desc_msg. user_id = getuid();
+.br
+	job_desc_msg. group_id = getgid();
+.br
+	if (!(slurm_alloc_msg_ptr = 
+.br
+	      slurm_allocate_resources_blocking(&job_desc_msg, 0, NULL))) {
+.br
+		slurm_perror ("slurm_allocate_resources_blocking error");
+.br
+		exit (1);
+.br
+	}
+.br 
+	printf ("Allocated nodes %s to job_id %u\\n", 
+.br 
+	        slurm_alloc_msg_ptr\->node_list, 
+.br
+	        slurm_alloc_msg_ptr\->job_id );
+.br 
+	if (slurm_kill_job(slurm_alloc_msg_ptr\->job_id, SIGKILL, 0)) {
 .br 
 		printf ("kill errno %d\\n", slurm_get_errno());
 .br 
diff --git a/doc/man/man3/slurm_allocate_resources_blocking.3 b/doc/man/man3/slurm_allocate_resources_blocking.3
new file mode 100644
index 00000000000..6534eeb96c7
--- /dev/null
+++ b/doc/man/man3/slurm_allocate_resources_blocking.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_allocation_lookup_lite.3 b/doc/man/man3/slurm_allocation_lookup_lite.3
new file mode 100644
index 00000000000..6534eeb96c7
--- /dev/null
+++ b/doc/man/man3/slurm_allocation_lookup_lite.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_allocation_msg_thr_create.3 b/doc/man/man3/slurm_allocation_msg_thr_create.3
new file mode 100644
index 00000000000..6534eeb96c7
--- /dev/null
+++ b/doc/man/man3/slurm_allocation_msg_thr_create.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_allocation_msg_thr_destroy.3 b/doc/man/man3/slurm_allocation_msg_thr_destroy.3
new file mode 100644
index 00000000000..6534eeb96c7
--- /dev/null
+++ b/doc/man/man3/slurm_allocation_msg_thr_destroy.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_checkpoint_task_complete.3 b/doc/man/man3/slurm_checkpoint_task_complete.3
new file mode 100644
index 00000000000..32120a6bbb2
--- /dev/null
+++ b/doc/man/man3/slurm_checkpoint_task_complete.3
@@ -0,0 +1 @@
+.so man3/slurm_checkpoint_error.3
diff --git a/doc/man/man3/slurm_clear_trigger.3 b/doc/man/man3/slurm_clear_trigger.3
new file mode 100644
index 00000000000..2fd720318c8
--- /dev/null
+++ b/doc/man/man3/slurm_clear_trigger.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_free_job_alloc_info_response_msg.3 b/doc/man/man3/slurm_free_job_alloc_info_response_msg.3
new file mode 100644
index 00000000000..836ffa79b47
--- /dev/null
+++ b/doc/man/man3/slurm_free_job_alloc_info_response_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index c31031800d4..ce1b8dabd5b 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -1,7 +1,7 @@
 .TH "Slurm API" "3" "September 2006" "Morris Jette" "Slurm job information reporting functions"
 .SH "NAME"
-slurm_free_job_info_msg, slurm_get_end_time,
-slurm_get_rem_time, slurm_get_select_jobinfo,
+slurm_free_job_alloc_info_response_msg, slurm_free_job_info_msg, 
+slurm_get_end_time, slurm_get_rem_time, slurm_get_select_jobinfo,
 slurm_load_jobs, slurm_pid2jobid, 
 slurm_print_job_info, slurm_print_job_info_msg
 \- Slurm job information reporting functions
@@ -19,6 +19,12 @@ ISLURM_GET_REM_TIME, ISLURM_GET_REM_TIME2
 .br
 #include <sys/types.h>
 .LP
+void \fBslurm_free_job_alloc_info_response_msg\fR (
+.br 
+	job_alloc_info_response_msg_t *\fIjob_alloc_info_msg_ptr\fP
+.br 
+);
+.LP 
 void \fBslurm_free_job_info_msg\fR (
 .br 
 	job_info_msg_t *\fIjob_info_msg_ptr\fP
@@ -180,6 +186,9 @@ greater than the last time changes where made to that information, new
 information is not returned.  Otherwise all the configuration. job, node, 
 or partition records are returned.
 .SH "DESCRIPTION"
+.JP
+\fBslurm_free_resource_allocation_response_msg\fR Free slurm resource
+allocation response message.
 .LP 
 \fBslurm_free_job_info_msg\fR Release the storage generated by the 
 \fBslurm_load_jobs\fR function.
diff --git a/doc/man/man3/slurm_free_node_info_msg.3 b/doc/man/man3/slurm_free_node_info_msg.3
new file mode 100644
index 00000000000..63979eec2ec
--- /dev/null
+++ b/doc/man/man3/slurm_free_node_info_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_node_info.3
diff --git a/doc/man/man3/slurm_free_partition_info_msg.3 b/doc/man/man3/slurm_free_partition_info_msg.3
new file mode 100644
index 00000000000..0e99ece3646
--- /dev/null
+++ b/doc/man/man3/slurm_free_partition_info_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_partition_info.3
diff --git a/doc/man/man3/slurm_free_slurmd_status.3 b/doc/man/man3/slurm_free_slurmd_status.3
new file mode 100644
index 00000000000..d7153f13864
--- /dev/null
+++ b/doc/man/man3/slurm_free_slurmd_status.3
@@ -0,0 +1 @@
+.so man3/slurm_slurmd_status.3
diff --git a/doc/man/man3/slurm_free_trigger_msg.3 b/doc/man/man3/slurm_free_trigger_msg.3
new file mode 100644
index 00000000000..2fd720318c8
--- /dev/null
+++ b/doc/man/man3/slurm_free_trigger_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_get_checkpoint_file_path.3 b/doc/man/man3/slurm_get_checkpoint_file_path.3
new file mode 100644
index 00000000000..32120a6bbb2
--- /dev/null
+++ b/doc/man/man3/slurm_get_checkpoint_file_path.3
@@ -0,0 +1 @@
+.so man3/slurm_checkpoint_error.3
diff --git a/doc/man/man3/slurm_get_triggers.3 b/doc/man/man3/slurm_get_triggers.3
new file mode 100644
index 00000000000..2fd720318c8
--- /dev/null
+++ b/doc/man/man3/slurm_get_triggers.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_job_step_layout_free.3 b/doc/man/man3/slurm_job_step_layout_free.3
new file mode 100644
index 00000000000..f600ff5c164
--- /dev/null
+++ b/doc/man/man3/slurm_job_step_layout_free.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_step_info_response_msg.3
diff --git a/doc/man/man3/slurm_job_step_layout_get.3 b/doc/man/man3/slurm_job_step_layout_get.3
new file mode 100644
index 00000000000..f600ff5c164
--- /dev/null
+++ b/doc/man/man3/slurm_job_step_layout_get.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_step_info_response_msg.3
diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3
index 0f6685bcfea..e07d6f7db2e 100644
--- a/doc/man/man3/slurm_kill_job.3
+++ b/doc/man/man3/slurm_kill_job.3
@@ -1,6 +1,8 @@
 .TH "Slurm API" "3" "November 2003" "Morris Jette" "Slurm job signal calls"
 .SH "NAME"
-slurm_kill_job, slurm_kill_job_step \- Slurm job signal calls
+slurm_kill_job, slurm_kill_job_step,
+slurm_signal_job, slurm_signal_job_step,
+slurm_terminate_job, slurm_terminate_job_step \- Slurm job signal calls
 .SH "SYNTAX"
 .LP 
 #include <slurm/slurm.h>
@@ -24,6 +26,38 @@ int \fBslurm_kill_job_step\fR (
 	uint16_t \fIsignal\fP
 .br 
 );
+.LP
+int \fBslurm_signal_job\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+	uint16_t \fIsignal\fP
+.br 
+);
+.LP
+int \fBslurm_signal_job_step\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+	uint32_t \fIjob_step_id\fP,
+.br 
+	uint16_t \fIsignal\fP
+.br 
+);
+.LP
+int \fBslurm_terminate_job\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+);
+.LP
+int \fBslurm_terminate_job_step\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+	uint32_t \fIjob_step_id\fP,
+.br 
+);
 .SH "ARGUMENTS"
 .LP 
 \fIbatch_flag\fP
@@ -46,6 +80,18 @@ This function may only be successfully executed by the job's owner or user root.
 .LP 
 \fBslurm_kill_job_step\fR Request that a signal be sent to a specific job step. 
 This function may only be successfully executed by the job's owner or user root.
+\fBslurm_signal_job\fR Request that send the specified signal to all
+steps of an existing job. 
+\fBslurm_signal_job_step\fR Request that send the specified signal to
+an existing job step. 
+\fBslurm_terminate_job\fR Request that terminates all steps of an
+existing job by sending a REQUEST_TERMINATE_JOB rpc to all slurmd in
+the the job allocation, and then calls slurm_complete_job(). 
+\fBslurm_signal_job_step\fR Request that terminates a job step by
+sending a REQUEST_TERMINATE_TASKS rpc to all slurmd of a job step, and
+then calls slurm_complete_job_step() after verifying that all nodes in
+the job step no longer have running tasks from the job step.  (May
+take over 35 seconds to return.) 
 .SH "RETURN VALUE"
 .LP
 On success, zero is returned. On error, \-1 is returned, and Slurm error code is set appropriately.
diff --git a/doc/man/man3/slurm_load_slurmd_status.3 b/doc/man/man3/slurm_load_slurmd_status.3
new file mode 100644
index 00000000000..d7153f13864
--- /dev/null
+++ b/doc/man/man3/slurm_load_slurmd_status.3
@@ -0,0 +1 @@
+.so man3/slurm_slurmd_status.3
diff --git a/doc/man/man3/slurm_notify_job.3 b/doc/man/man3/slurm_notify_job.3
new file mode 100644
index 00000000000..836ffa79b47
--- /dev/null
+++ b/doc/man/man3/slurm_notify_job.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_ping.3 b/doc/man/man3/slurm_ping.3
new file mode 100644
index 00000000000..8c2ed98140d
--- /dev/null
+++ b/doc/man/man3/slurm_ping.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_print_slurmd_status.3 b/doc/man/man3/slurm_print_slurmd_status.3
new file mode 100644
index 00000000000..d7153f13864
--- /dev/null
+++ b/doc/man/man3/slurm_print_slurmd_status.3
@@ -0,0 +1 @@
+.so man3/slurm_slurmd_status.3
diff --git a/doc/man/man3/slurm_read_hostfile.3 b/doc/man/man3/slurm_read_hostfile.3
new file mode 100644
index 00000000000..6534eeb96c7
--- /dev/null
+++ b/doc/man/man3/slurm_read_hostfile.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_set_debug_level.3 b/doc/man/man3/slurm_set_debug_level.3
new file mode 100644
index 00000000000..8c2ed98140d
--- /dev/null
+++ b/doc/man/man3/slurm_set_debug_level.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_set_trigger.3 b/doc/man/man3/slurm_set_trigger.3
new file mode 100644
index 00000000000..2fd720318c8
--- /dev/null
+++ b/doc/man/man3/slurm_set_trigger.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_signal_job.3 b/doc/man/man3/slurm_signal_job.3
new file mode 100644
index 00000000000..90c80c4ee62
--- /dev/null
+++ b/doc/man/man3/slurm_signal_job.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_signal_job_step.3 b/doc/man/man3/slurm_signal_job_step.3
new file mode 100644
index 00000000000..90c80c4ee62
--- /dev/null
+++ b/doc/man/man3/slurm_signal_job_step.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_slurmd_status.3 b/doc/man/man3/slurm_slurmd_status.3
new file mode 100644
index 00000000000..87c4badc1f8
--- /dev/null
+++ b/doc/man/man3/slurm_slurmd_status.3
@@ -0,0 +1,67 @@
+.TH "Slurm API" "3" "Oct 2008" "Danny Auble" "Slurmd status functions"
+
+.SH "NAME"
+
+slurm_free_slurmd_status, slurm_load_slurmd_status, slurm_print_slurmd_status
+
+.SH "SYNTAX"
+.LP 
+#include <slurm/slurm.h>
+.LP
+.LP
+void \fBslurm_free_slurmd_status\fR (
+.br
+	slurmd_status_t* \fIslurmd_status_ptr\fP 
+.br
+);
+.LP
+int \fBslurm_load_slurmd_status\fR (
+.br
+	slurmd_status_t** \fIslurmd_status_ptr\fP 
+.br
+);
+.LP
+void \fBslurm_print_slurmd_status\fR (
+.br
+	FILE *\fIout\fP,
+.br
+	slurmd_status_t* \fIslurmd_status_pptr\fP 
+.br
+);
+
+.SH "ARGUMENTS"
+.LP 
+.TP
+\fIslurmd_status_ptr\fP 
+Slurmd status pointer.  Created by \fBslurm_load_slurmd_status\fR,
+used in subsequent function calls, and destroyed by
+\fBslurm_free_slurmd_status\fR.
+
+.SH "DESCRIPTION"
+.LP
+\fBslurm_free_slurmd_status\fR free slurmd state information.
+.LP 
+\fBslurm_load_slurmd_status\fR issue RPC to get the status of slurmd
+daemon on this machine.
+.LP
+\fBslurm_print_slurmd_status\fR output the contents of slurmd status
+message as loaded using slurm_load_slurmd_status.
+
+.SH "COPYING"
+Copyright (C) 2006-2007 The Regents of the University of California.
+Copyright (C) 2008 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+LLNL\-CODE\-402394.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
diff --git a/doc/man/man3/slurm_sprint_job_info.3 b/doc/man/man3/slurm_sprint_job_info.3
new file mode 100644
index 00000000000..836ffa79b47
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_job_info.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_sprint_job_step_info.3 b/doc/man/man3/slurm_sprint_job_step_info.3
new file mode 100644
index 00000000000..f600ff5c164
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_job_step_info.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_step_info_response_msg.3
diff --git a/doc/man/man3/slurm_sprint_node_table.3 b/doc/man/man3/slurm_sprint_node_table.3
new file mode 100644
index 00000000000..63979eec2ec
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_node_table.3
@@ -0,0 +1 @@
+.so man3/slurm_free_node_info.3
diff --git a/doc/man/man3/slurm_sprint_partition_info.3 b/doc/man/man3/slurm_sprint_partition_info.3
new file mode 100644
index 00000000000..0e99ece3646
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_partition_info.3
@@ -0,0 +1 @@
+.so man3/slurm_free_partition_info.3
diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3
index c949a221f20..54a96c2998e 100644
--- a/doc/man/man3/slurm_step_ctx_create.3
+++ b/doc/man/man3/slurm_step_ctx_create.3
@@ -1,7 +1,9 @@
 .TH "Slurm API" "3" "March 2007" "Morris Jette" "Slurm job step context functions"
 
 .SH "NAME"
-slurm_step_ctx_create, slurm_step_ctx_get, slurm_jobinfo_ctx_get,
+slurm_step_ctx_create, slurm_step_ctx_create_no_alloc,
+slurm_step_ctx_daemon_per_node_hack, slurm_step_ctx_get,
+slurm_step_ctx_params_t_init, slurm_jobinfo_ctx_get,
 slurm_spawn_kill, slurm_step_ctx_destroy \- Slurm task spawn functions
 
 .SH "SYNTAX"
@@ -11,13 +13,25 @@ slurm_spawn_kill, slurm_step_ctx_destroy \- Slurm task spawn functions
 .LP
 slurm_step_ctx \fBslurm_step_ctx_create\fR (
 .br
-	job_step_create_request_msg_t *\fIstep_req\fP 
+	slurm_step_ctx_params_t *\fIstep_req\fP 
+.br
+);
+.LP
+slurm_step_ctx \fBslurm_step_ctx_create_no_alloc\fR (
+.br
+	slurm_step_ctx_params_t *\fIstep_req\fP 
+.br
+);
+.LP
+int \fBslurm_step_ctx_daemon_per_node_hack\fR (
+.br
+	slurm_step_ctx_t *\fIctx\fP
 .br
 );
 .LP
 int \fBslurm_step_ctx_get\fR (
 .br
-	slurm_step_ctx \fIctx\fP,
+	slurm_step_ctx_t *\fIctx\fP,
 .br
 	int \fIctx_key\fP,
 .br
@@ -35,6 +49,12 @@ int \fBslurm_jobinfo_ctx_get\fR (
 .br
 );
 .LP
+void \fBslurm_step_ctx_params_t_init\fR (
+.br
+	slurm_step_ctx_params_t *\fIstep_req\fP 
+.br
+);
+.LP
 int \fBslurm_spawn\fR {
 .br
 	slurm_step_ctx \fIctx\fP,
@@ -64,7 +84,8 @@ Specifies the pointer to the structure with job step request specification. See
 slurm.h for full details on the data structure's contents.
 .TP
 \fIctx\fP
-Job step context. Created by \fBslurm_step_ctx_create\fR, used in subsequent
+Job step context. Created by \fBslurm_step_ctx_create\fR, or
+\fBslurm_step_ctx_create_no_alloc\fR used in subsequent
 function calls, and destroyed by \fBslurm_step_ctx_destroy\fR.
 .TP
 \fIctx_key\fP
@@ -99,11 +120,22 @@ finished. NOTE: this function creates a slurm job step. Call \fBslurm_spawn\fR
 in a timely fashion to avoid having job step credentials time out. If
 \fBslurm_spawn\fR is not used, explicitly cancel the job step. 
 .LP
+\fBslurm_step_ctx_create_no_alloc\fR Same as above, only no
+allocation is made. To avoid memory leaks call
+\fBslurm_step_ctx_destroy\fR when the use of this context is finished.
+.LP
+\fBslurm_step_ctx_daemon_per_node_hack\fR Hack the step context to run
+a single process per node, regardless of the settings selected at
+slurm_step_ctx_create time. 
+.LP
 \fBslurm_step_ctx_get\fR Get values from a job step context.
 \fIctx_key\fP identifies the fields to be gathered from the job step context.
 Subsequent arguments to this function are dependent upon the value
 of \fIctx_key\fP. See the \fBCONTEXT KEYS\fR section for details.
 .LP
+\fBslurm_step_ctx_params_t_init\fR This initializes parameters in the
+structure that you will pass to slurm_step_ctx_create().
+.LP
 \fBslurm_spawn\fR Spawn tasks based upon a job step context
 and establish communications with the tasks using the socket 
 file descriptors specified.
diff --git a/doc/man/man3/slurm_step_ctx_create_no_alloc.3 b/doc/man/man3/slurm_step_ctx_create_no_alloc.3
new file mode 100644
index 00000000000..6c5422fcb1a
--- /dev/null
+++ b/doc/man/man3/slurm_step_ctx_create_no_alloc.3
@@ -0,0 +1 @@
+.so man3/slurm_step_ctx_create.3
diff --git a/doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3 b/doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3
new file mode 100644
index 00000000000..6c5422fcb1a
--- /dev/null
+++ b/doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3
@@ -0,0 +1 @@
+.so man3/slurm_step_ctx_create.3
diff --git a/doc/man/man3/slurm_step_ctx_params_t_init.3 b/doc/man/man3/slurm_step_ctx_params_t_init.3
new file mode 100644
index 00000000000..6c5422fcb1a
--- /dev/null
+++ b/doc/man/man3/slurm_step_ctx_params_t_init.3
@@ -0,0 +1 @@
+.so man3/slurm_step_ctx_create.3
diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3
index 77eedfb4f07..fb5c77345c3 100644
--- a/doc/man/man3/slurm_step_launch.3
+++ b/doc/man/man3/slurm_step_launch.3
@@ -2,7 +2,8 @@
 
 .SH "NAME"
 
-slurm_step_launch_params_t_init, slurm_step_launch, slurm_step_launch_wait_start,
+slurm_step_launch_params_t_init, slurm_step_launch,
+slurm_step_launch_fwd_signal, slurm_step_launch_wait_start,
 slurm_step_launch_wait_finish, slurm_step_launch_abort \- Slurm job step launch functions
 
 .SH "SYNTAX"
@@ -26,6 +27,14 @@ int \fBslurm_step_launch\fR (
 .br
 );
 .LP
+void \fBslurm_step_launch_fwd_signal\fR (
+.br
+	slurm_step_ctx \fIctx\fP,
+.br
+	int \fIsigno\fP
+.br
+);
+.LP
 int \fBslurm_step_launch_wait_start\fR (
 .br
 	slurm_step_ctx \fIctx\fP
@@ -66,6 +75,9 @@ default values.  This function will NOT allocate any new memory.
 .LP
 \fBslurm_step_launch\fR Launch a parallel job step.
 .LP
+\fBslurm_step_launch_fwd_signal\fR Forward a signal to all those nodes
+with running tasks.
+.LP
 \fBslurm_step_launch_wait_start\fR Block until all tasks have started.
 .LP
 \fBslurm_step_launch_wait_finish\fR Block until all tasks have finished 
diff --git a/doc/man/man3/slurm_step_launch_fwd_signal.3 b/doc/man/man3/slurm_step_launch_fwd_signal.3
new file mode 100644
index 00000000000..b54973d6787
--- /dev/null
+++ b/doc/man/man3/slurm_step_launch_fwd_signal.3
@@ -0,0 +1 @@
+.so man3/slurm_step_launch.3
diff --git a/doc/man/man3/slurm_terminate_job.3 b/doc/man/man3/slurm_terminate_job.3
new file mode 100644
index 00000000000..90c80c4ee62
--- /dev/null
+++ b/doc/man/man3/slurm_terminate_job.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_terminate_job_step.3 b/doc/man/man3/slurm_terminate_job_step.3
new file mode 100644
index 00000000000..90c80c4ee62
--- /dev/null
+++ b/doc/man/man3/slurm_terminate_job_step.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_trigger.3 b/doc/man/man3/slurm_trigger.3
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/api/pmi_server.c b/src/api/pmi_server.c
index 6b0a73e8904..730b6f44a27 100644
--- a/src/api/pmi_server.c
+++ b/src/api/pmi_server.c
@@ -168,8 +168,8 @@ static void *_msg_thread(void *x)
 
 	slurm_mutex_lock(&agent_mutex);
 	agent_cnt--;
-	slurm_mutex_unlock(&agent_mutex);
 	pthread_cond_signal(&agent_cond);
+	slurm_mutex_unlock(&agent_mutex);
 	xfree(x);
 	return NULL;
 }
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index 4e187505d9b..049dca2d281 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -858,6 +858,10 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	if (!sls->no_kill) {
 		info("Cancelling job step %u.%u", nf->job_id, nf->step_id);
 		slurm_kill_job_step(nf->job_id, nf->step_id, SIGKILL);
+		/* In an ideal world, we close the socket to this node and
+		 * normally terminate the remaining tasks. In practice this
+		 * is very difficult. The exercise is left to the reader. */
+		exit(1);
 	}
 
 	fail_nodes = hostset_create(nf->nodelist);
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index aafe757d127..1e4461899e8 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -130,7 +130,12 @@ static int _local_update_assoc_qos_list(acct_association_rec_t *assoc,
 				list_flush(assoc->qos_list);
 			list_append(assoc->qos_list, xstrdup(new_qos+1));
 			flushed = 1;
-		} 
+		} else if(new_qos[0]) {
+			if(!flushed)
+				list_flush(assoc->qos_list);
+			list_append(assoc->qos_list, xstrdup(new_qos));
+			flushed = 1;			
+		}
 	}
 	list_iterator_destroy(curr_qos_itr);
 	list_iterator_destroy(new_qos_itr);
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 516f25d3c6b..14226e7ab3c 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -231,7 +231,7 @@ extern void pack_jobacct_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 	_pack_sacct(&job->sacct, buffer);
 	pack32(job->show_full, buffer);
 	pack_time(job->start, buffer);
-	pack16(job->state, buffer);
+	pack16((uint16_t)job->state, buffer);
 	if(job->steps)
 		count = list_count(job->steps);
 	pack32(count, buffer);
@@ -262,6 +262,7 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 	jobacct_step_rec_t *step = NULL;
 	uint32_t count = 0;
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
 
 	*job = job_ptr;
 
@@ -273,21 +274,24 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 	safe_unpack32(&job_ptr->elapsed, buffer);
 	safe_unpack_time(&job_ptr->eligible, buffer);
 	safe_unpack_time(&job_ptr->end, buffer);
-	safe_unpack32((uint32_t *)&job_ptr->exitcode, buffer);
+	safe_unpack32(&uint32_tmp, buffer);
+	job_ptr->exitcode = (int32_t)uint32_tmp;
 	safe_unpack32(&job_ptr->gid, buffer);
 	safe_unpack32(&job_ptr->jobid, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->jobname, &uint32_tmp, buffer);
 	safe_unpack32(&job_ptr->lft, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer);
-	safe_unpack32((uint32_t *)&job_ptr->priority, buffer);
+	safe_unpack32(&uint32_tmp, buffer);
+	job_ptr->priority = (int32_t)uint32_tmp;
 	safe_unpack16(&job_ptr->qos, buffer);
 	safe_unpack32(&job_ptr->req_cpus, buffer);
 	safe_unpack32(&job_ptr->requid, buffer);
 	_pack_sacct(&job_ptr->sacct, buffer);
 	safe_unpack32(&job_ptr->show_full, buffer);
 	safe_unpack_time(&job_ptr->start, buffer);
-	safe_unpack16((uint16_t *)&job_ptr->state, buffer);
+	safe_unpack16(&uint16_tmp, buffer);
+	job_ptr->state = uint16_tmp;
 	safe_unpack32(&count, buffer);
 
 	job_ptr->steps = list_create(destroy_jobacct_step_rec);
@@ -345,20 +349,23 @@ extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step,
 				   uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
 	jobacct_step_rec_t *step_ptr = xmalloc(sizeof(jobacct_step_rec_t));
 
 	*step = step_ptr;
 
 	safe_unpack32(&step_ptr->elapsed, buffer);
 	safe_unpack_time(&step_ptr->end, buffer);
-	safe_unpack32((uint32_t *)&step_ptr->exitcode, buffer);
+	safe_unpack32(&uint32_tmp, buffer);
+	step_ptr->exitcode = (int32_t)uint32_tmp;
 	safe_unpack32(&step_ptr->jobid, buffer);
 	safe_unpack32(&step_ptr->ncpus, buffer);
         safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
 	safe_unpack32(&step_ptr->requid, buffer);
 	_unpack_sacct(&step_ptr->sacct, buffer);
 	safe_unpack_time(&step_ptr->start, buffer);
-	safe_unpack16((uint16_t *)&step_ptr->state, buffer);
+	safe_unpack16(&uint16_tmp, buffer);
+	step_ptr->state = uint16_tmp;
 	safe_unpack32(&step_ptr->stepid, buffer);	/* job's step number */
 	safe_unpackstr_xmalloc(&step_ptr->stepname, &uint32_tmp, buffer);
 	safe_unpack32(&step_ptr->suspended, buffer);
diff --git a/src/common/log.c b/src/common/log.c
index 545bea8274d..0c1d8141b12 100644
--- a/src/common/log.c
+++ b/src/common/log.c
@@ -362,7 +362,7 @@ static char *vxstrfmt(const char *fmt, va_list ap)
 	size_t      len = (size_t) 0;
 	char        tmp[LINEBUFSIZE];
 	int         unprocessed = 0;
-
+	int         long_long = 0;
 
 	while (*fmt != '\0') {
 
@@ -436,24 +436,76 @@ static char *vxstrfmt(const char *fmt, va_list ap)
 					xstrcat(buf, "%u");
 				break;
 			case 'l':
+				if((unprocessed == 0) && (*(p+1) == 'l')) {
+					long_long = 1;
+					p++;
+				}
+				
 				if ((unprocessed == 0) && (*(p+1) == 'u')) {
-					snprintf(tmp, sizeof(tmp), "%lu",
-						va_arg(ap, long unsigned));
+					if(long_long) {
+						snprintf(tmp, sizeof(tmp),
+							"%llu", 
+							 va_arg(ap,
+								long long unsigned));
+						long_long = 0;
+					} else 
+						snprintf(tmp, sizeof(tmp),
+							 "%lu",
+							 va_arg(ap,
+								long unsigned));
 					xstrcat(buf, tmp);
 					p++;
 				} else if ((unprocessed==0) && (*(p+1)=='d')) {
-					snprintf(tmp, sizeof(tmp), "%ld",
-						va_arg(ap, long int));
+					if(long_long) {
+						snprintf(tmp, sizeof(tmp),
+							"%lld", 
+							 va_arg(ap,
+								long long int));
+						long_long = 0;
+					} else
+						snprintf(tmp, sizeof(tmp),
+							 "%ld",
+							 va_arg(ap, long int));
+					xstrcat(buf, tmp);
+					p++;
+				} else if ((unprocessed==0) && (*(p+1)=='f')) {
+					if(long_long) {
+						xstrcat(buf, "%llf");
+						long_long = 0;
+					} else 
+						snprintf(tmp, sizeof(tmp),
+							 "%lf",
+							 va_arg(ap, double));
 					xstrcat(buf, tmp);
 					p++;
 				} else if ((unprocessed==0) && (*(p+1)=='x')) {
-					snprintf(tmp, sizeof(tmp), "%lx",
-						va_arg(ap, long int));
+					if(long_long) {
+						snprintf(tmp, sizeof(tmp),
+							 "%llx", 
+							 va_arg(ap,
+								long long int));
+						long_long = 0;
+					} else
+						snprintf(tmp, sizeof(tmp),
+							 "%lx",
+							 va_arg(ap, long int));
 					xstrcat(buf, tmp);
 					p++;
+				} else if(long_long) {
+					xstrcat(buf, "%ll");
+					long_long = 0;
 				} else
 					xstrcat(buf, "%l");
 				break; 
+			case 'L':
+				if ((unprocessed==0) && (*(p+1)=='f')) {
+					snprintf(tmp, sizeof(tmp), "%Lf", 
+						 va_arg(ap, long double));
+					xstrcat(buf, tmp);
+					p++;
+				} else
+					xstrcat(buf, "%L");
+				break;
 			default:	/* try to handle the rest  */
 				xstrcatchar(buf, '%');
 				xstrcatchar(buf, *p);
diff --git a/src/common/node_select.c b/src/common/node_select.c
index c8964aa00ff..c20ec1a62c9 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -648,7 +648,8 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer)
 	char *bp_inx_str;
 	
 	safe_unpackstr_xmalloc(&(bg_info_record->nodes), &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&(bg_info_record->ionodes), &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&(bg_info_record->ionodes), 
+			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&bg_info_record->owner_name,
 			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&bg_info_record->bg_block_id,
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index 2c4ee965935..8ce4ae86294 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -2434,6 +2434,7 @@ extern void pack_acct_cluster_cond(void *in, uint16_t rpc_version, Buf buffer)
 		}
 		list_iterator_destroy(itr);
 	}
+	count = NO_VAL;
 
 	pack32(object->usage_end, buffer);
 	pack32(object->usage_start, buffer);
@@ -2967,7 +2968,7 @@ extern int unpack_acct_association_cond(void **object,
 		   NO_VAL
 		*/
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if(count && count != NO_VAL) {
 			object_ptr->fairshare_list = 
 				list_create(slurm_destroy_char);
 			list_append(object_ptr->fairshare_list,
@@ -2975,7 +2976,7 @@ extern int unpack_acct_association_cond(void **object,
 		}
 
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if(count && count != NO_VAL) {
 			object_ptr->id_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
@@ -2985,7 +2986,7 @@ extern int unpack_acct_association_cond(void **object,
 		}
 	
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if(count && count != NO_VAL) {
 			object_ptr->max_cpu_mins_pj_list = 
 				list_create(slurm_destroy_char);
 			list_append(object_ptr->max_cpu_mins_pj_list,
@@ -2993,7 +2994,7 @@ extern int unpack_acct_association_cond(void **object,
 		}
 
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if(count && count != NO_VAL) {
 			object_ptr->max_jobs_list = 
 				list_create(slurm_destroy_char);
 			list_append(object_ptr->max_jobs_list,
@@ -3001,7 +3002,7 @@ extern int unpack_acct_association_cond(void **object,
 		}
 
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if(count && count != NO_VAL) {
 			object_ptr->max_nodes_pj_list = 
 				list_create(slurm_destroy_char);
 			list_append(object_ptr->max_nodes_pj_list,
@@ -3009,7 +3010,7 @@ extern int unpack_acct_association_cond(void **object,
 		}
 
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if(count && count != NO_VAL) {
 			object_ptr->max_wall_pj_list = 
 				list_create(slurm_destroy_char);
 			list_append(object_ptr->max_wall_pj_list,
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index fc45eb6b14f..804a894d761 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -75,7 +75,7 @@
 #define DBD_MAGIC		0xDEAD3219
 #define MAX_AGENT_QUEUE		10000
 #define MAX_DBD_MSG_LEN		16384
-#define SLURMDBD_TIMEOUT	60	/* Seconds SlurmDBD for response */
+#define SLURMDBD_TIMEOUT	300	/* Seconds SlurmDBD for response */
 
 bool running_cache = 0;
 
@@ -127,11 +127,9 @@ static int    _tot_wait (struct timeval *start_time);
 extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent, 
 				    bool rollback)
 {
-	slurm_mutex_lock(&agent_lock);
-	if (make_agent && ((agent_tid == 0) || (agent_list == NULL)))
-		_create_agent();
-	slurm_mutex_unlock(&agent_lock);
-
+	/* we need to set this up before we make the agent or we will
+	   get a threading issue.
+	*/
 	slurm_mutex_lock(&slurmdbd_lock);
 	xfree(slurmdbd_auth_info);
 	if (auth_info)
@@ -142,6 +140,12 @@ extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent,
 	if (slurmdbd_fd < 0)
 		_open_slurmdbd_fd();
 	slurm_mutex_unlock(&slurmdbd_lock);
+
+	slurm_mutex_lock(&agent_lock);
+	if (make_agent && ((agent_tid == 0) || (agent_list == NULL)))
+		_create_agent();
+	slurm_mutex_unlock(&agent_lock);
+
 	if (slurmdbd_fd < 0)
 		return SLURM_ERROR;
 	else
@@ -1482,7 +1486,7 @@ static void *_agent(void *x)
 
 		slurm_mutex_lock(&slurmdbd_lock);
 		if ((slurmdbd_fd < 0) && 
-		    (difftime(time(NULL), fail_time) >= 10)) {
+		    (difftime(time(NULL), fail_time) >= 10)) {			
 			/* The connection to Slurm DBD is not open */
 			_open_slurmdbd_fd();
 			if (slurmdbd_fd < 0)
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index 35847b0a213..96107e7a54b 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -43,7 +43,9 @@
 #include "src/common/timers.h"
 #include "src/common/slurm_protocol_api.h"
 
+#ifdef MYSQL_NOT_THREAD_SAFE
 pthread_mutex_t mysql_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
 
 #ifdef HAVE_MYSQL
 
@@ -184,7 +186,9 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 	char create_line[50];
 	MYSQL *mysql_db = NULL;
 
-//	slurm_mutex_lock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_lock(&mysql_lock);
+#endif
 	if(!(mysql_db = mysql_init(mysql_db)))
 		fatal("mysql_init failed: %s", mysql_error(mysql_db));
 	
@@ -203,12 +207,16 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 		     "user = %s pass = %s port = %u",
 		     db_info->host, db_info->user,
 		     db_info->pass, db_info->port);
+#ifdef MYSQL_NOT_THREAD_SAFE
 		slurm_mutex_unlock(&mysql_lock);
+#endif
 		fatal("mysql_real_connect failed: %d %s\n",
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db));
 	}
-//	slurm_mutex_unlock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 }
 
@@ -292,8 +300,9 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 {
 	if(!mysql_db)
 		fatal("You haven't inited this storage yet.");
+#ifdef MYSQL_NOT_THREAD_SAFE
 	slurm_mutex_lock(&mysql_lock);
-
+#endif
 	/* clear out the old results so we don't get a 2014 error */
 	_clear_results(mysql_db);		
 //try_again:
@@ -306,11 +315,15 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db), query);
 		errno = mysql_errno(mysql_db);
+#ifdef MYSQL_NOT_THREAD_SAFE
 		slurm_mutex_unlock(&mysql_lock);
+#endif
 		return SLURM_ERROR;
 	}
-	slurm_mutex_unlock(&mysql_lock);
 
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 }
 
@@ -323,8 +336,9 @@ extern int mysql_db_ping(MYSQL *mysql_db)
 
 extern int mysql_db_commit(MYSQL *mysql_db)
 {
-	//slurm_mutex_lock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_lock(&mysql_lock);
+#endif
 	/* clear out the old results so we don't get a 2014 error */
 	_clear_results(mysql_db);		
 	if(mysql_commit(mysql_db)) {
@@ -332,18 +346,22 @@ extern int mysql_db_commit(MYSQL *mysql_db)
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db));
 		errno = mysql_errno(mysql_db);
-		//slurm_mutex_unlock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+		slurm_mutex_unlock(&mysql_lock);
+#endif
 		return SLURM_ERROR;
 	}
-	//slurm_mutex_unlock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 }
 
 extern int mysql_db_rollback(MYSQL *mysql_db)
 {
-	//slurm_mutex_lock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_lock(&mysql_lock);
+#endif
 	/* clear out the old results so we don't get a 2014 error */
 	_clear_results(mysql_db);		
 	if(mysql_rollback(mysql_db)) {
@@ -351,12 +369,15 @@ extern int mysql_db_rollback(MYSQL *mysql_db)
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db));
 		errno = mysql_errno(mysql_db);
-		//slurm_mutex_unlock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+		slurm_mutex_unlock(&mysql_lock);
+#endif
 		return SLURM_ERROR;
 	}
 	//mysql_db_query(mysql_db, "unlock tables;");
-	//slurm_mutex_unlock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 
 }
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index d1753a3912f..6f061655758 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -923,7 +923,8 @@ static int _setup_association_cond_limits(acct_association_cond_t *assoc_cond,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.parent_acct=%s", prefix, object);
+			xstrfmtcat(*extra, "%s.parent_acct=\"%s\"",
+				   prefix, object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -995,7 +996,7 @@ static int _addto_update_list(List update_list, acct_update_type_t type,
  */
 static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 			 char *cluster,
-			 char *id, char *parent)
+			 char *id, char *parent, time_t now)
 {
 	int rc = SLURM_SUCCESS;
 	MYSQL_RES *result = NULL;
@@ -1036,41 +1037,43 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 	/* every thing below needs to be a %d not a %u because we are
 	   looking for -1 */
 	xstrfmtcat(query,
-		   "update %s set deleted = deleted + 2, "
+		   "update %s set mod_time=%d, deleted = deleted + 2, "
 		   "lft = lft + %d, rgt = rgt + %d "
 		   "WHERE lft BETWEEN %d AND %d;",
-		   assoc_table, diff, diff, lft, rgt);
+		   assoc_table, now, diff, diff, lft, rgt);
 
 	xstrfmtcat(query,
-		   "UPDATE %s SET rgt = rgt + %d WHERE "
+		   "UPDATE %s SET mod_time=%d, rgt = rgt + %d WHERE "
 		   "rgt > %d && deleted < 2;"
-		   "UPDATE %s SET lft = lft + %d WHERE "
+		   "UPDATE %s SET mod_time=%d, lft = lft + %d WHERE "
 		   "lft > %d && deleted < 2;",
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   par_left,
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   par_left);
 
 	xstrfmtcat(query,
-		   "UPDATE %s SET rgt = rgt - %d WHERE "
+		   "UPDATE %s SET mod_time=%d, rgt = rgt - %d WHERE "
 		   "(%d < 0 && rgt > %d && deleted < 2) "
 		   "|| (%d > 0 && rgt > %d);"
-		   "UPDATE %s SET lft = lft - %d WHERE "
+		   "UPDATE %s SET mod_time=%d, lft = lft - %d WHERE "
 		   "(%d < 0 && lft > %d && deleted < 2) "
 		   "|| (%d > 0 && lft > %d);",
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   diff, rgt,
 		   diff, lft,
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   diff, rgt,
 		   diff, lft);
 
 	xstrfmtcat(query,
-		   "update %s set deleted = deleted - 2 WHERE deleted > 1;",
-		   assoc_table);
+		   "update %s set mod_time=%d, "
+		   "deleted = deleted - 2 WHERE deleted > 1;",
+		   assoc_table, now);
 	xstrfmtcat(query,
-		   "update %s set parent_acct=\"%s\" where id = %s;",
-		   assoc_table, parent, id);
+		   "update %s set mod_time=%d, "
+		   "parent_acct=\"%s\" where id = %s;",
+		   assoc_table, now, parent, id);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
@@ -1086,15 +1089,13 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 			uint32_t lft, uint32_t rgt,
 			char *cluster,
-			char *id, char *old_parent, char *new_parent)
+			char *id, char *old_parent, char *new_parent,
+			time_t now)
 {
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	char *query = NULL;
 	int rc = SLURM_SUCCESS;
-	List assoc_list = NULL;
-	ListIterator itr = NULL;
-	acct_association_rec_t *assoc = NULL;
 		
 	/* first we need to see if we are going to make a child of this
 	 * account the new parent.  If so we need to move that child to this
@@ -1117,7 +1118,7 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 		debug4("%s(%s) %s,%s is a child of %s",
 		       new_parent, row[0], row[1], row[2], id);
 		rc = _move_account(mysql_conn, atoi(row[1]), atoi(row[2]),
-				   cluster, row[0], old_parent);
+				   cluster, row[0], old_parent, now);
 	}
 
 	mysql_free_result(result);
@@ -1142,7 +1143,7 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 
 	if((row = mysql_fetch_row(result))) {
 		rc = _move_account(mysql_conn, atoi(row[0]), atoi(row[1]),
-				   cluster, id, new_parent);
+				   cluster, id, new_parent, now);
 	} else {
 		error("can't find parent? we were able to a second ago.");
 		rc = SLURM_ERROR;
@@ -1152,26 +1153,6 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 	if(rc == SLURM_ERROR) 
 		return rc;
 	
-	/* now we need to send the update of the new parents and
-	 * limits, so just to be safe, send the whole tree
-	 */
-	assoc_list = acct_storage_p_get_associations(mysql_conn, uid, NULL);
-	/* NOTE: you can not use list_pop, or list_push
-	   anywhere either, since mysql is
-	   exporting something of the same type as a macro,
-	   which messes everything up (my_list.h is the bad boy).
-	   So we are just going to delete each item as it
-	   comes out since we are moving it to the update_list.
-	*/
-	itr = list_iterator_create(assoc_list);
-	while((assoc = list_next(itr))) {
-		if(_addto_update_list(mysql_conn->update_list, 
-				      ACCT_MODIFY_ASSOC,
-				      assoc) == SLURM_SUCCESS) 
-			list_remove(itr);
-	}
-	list_iterator_destroy(itr);
-	list_destroy(assoc_list);
 	return rc;
 }
 
@@ -1255,7 +1236,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			       acct_association_rec_t *assoc,
 			       char *acct,
 			       uint32_t lft, uint32_t rgt,
-			       List ret_list)
+			       List ret_list, int moved_parent)
 {
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -1427,7 +1408,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 						    row[ASSOC_ACCT],
 						    atoi(row[ASSOC_LFT]),
 						    atoi(row[ASSOC_RGT]),
-						    ret_list);
+						    ret_list, moved_parent);
 				destroy_acct_association_rec(mod_assoc);
 				continue;
 			}
@@ -1448,15 +1429,19 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			}
 			
 			list_append(ret_list, object);
-			
-			if(_addto_update_list(mysql_conn->update_list, 
-					      ACCT_MODIFY_ASSOC,
-					      mod_assoc) != SLURM_SUCCESS) 
-				error("couldn't add to the update list");
-		} else {
-			info("assoc %s is not modified", row[ASSOC_ID]);
-			xfree(mod_assoc);
-		}
+
+			if(moved_parent)
+				destroy_acct_association_rec(mod_assoc);
+			else
+				if(_addto_update_list(mysql_conn->update_list, 
+						      ACCT_MODIFY_ASSOC,
+						      mod_assoc)
+				   != SLURM_SUCCESS) 
+					error("couldn't add to "
+					      "the update list");
+		} else 
+			destroy_acct_association_rec(mod_assoc);
+		
 	}
 	mysql_free_result(result);
 
@@ -3131,7 +3116,7 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		/* Add user root by default to run from the root
 		 * association.  This gets popped off so we need to
-		 * readd it every time here. 
+		 * read it every time here. 
 		 */
 		assoc = xmalloc(sizeof(acct_association_rec_t));
 		init_acct_association_rec(assoc);
@@ -3184,6 +3169,7 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 	int assoc_id = 0;
 	int incr = 0, my_left = 0;
 	int affect_rows = 0;
+	int moved_parent = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	char *old_parent = NULL, *old_cluster = NULL;
@@ -3233,7 +3219,8 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 		xstrcat(cols, "creation_time, mod_time, cluster, acct");
 		xstrfmtcat(vals, "%d, %d, \"%s\", \"%s\"", 
 			   now, now, object->cluster, object->acct); 
-		xstrfmtcat(update, "where id>=0 && cluster=\"%s\" && acct=\"%s\"",
+		xstrfmtcat(update, 
+			   "where cluster=\"%s\" && acct=\"%s\"",
 			   object->cluster, object->acct); 
 
 		xstrfmtcat(extra, ", mod_time=%d", now);
@@ -3434,9 +3421,10 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 						object->cluster,
 						row[MASSOC_ID],
 						row[MASSOC_PACCT],
-						object->parent_acct)
+						object->parent_acct, now)
 				   == SLURM_ERROR)
 					continue;
+				moved_parent = 1;
 			}
 
 
@@ -3472,7 +3460,7 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 		}
 
 		object->id = assoc_id;
-
+		
 		if(_addto_update_list(mysql_conn->update_list, ACCT_ADD_ASSOC,
 				      object) == SLURM_SUCCESS) {
 			list_remove(itr);
@@ -3527,6 +3515,10 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 	}
 
 end_it:
+
+	xfree(old_parent);
+	xfree(old_cluster);
+
 	if(rc != SLURM_ERROR) {
 		if(txn_query) {
 			xstrcat(txn_query, ";");
@@ -3538,6 +3530,45 @@ end_it:
 				rc = SLURM_SUCCESS;
 			}
 		}
+		if(moved_parent) {
+			List assoc_list = NULL;
+			ListIterator itr = NULL;
+			acct_association_rec_t *assoc = NULL;
+			//acct_association_cond_t assoc_cond;
+			/* now we need to send the update of the new parents and
+			 * limits, so just to be safe, send the whole
+			 * tree because we could have some limits that
+			 * were affected but not noticed.
+			 */
+			/* we can probably just look at the mod time now but
+			 * we will have to wait for the next revision number
+			 * since you can't query on mod time here and I don't
+			 * want to rewrite code to make it happen
+			 */
+			//bzero(&assoc_cond, sizeof(acct_association_cond_t));
+			
+			if(!(assoc_list = 
+			     acct_storage_p_get_associations(mysql_conn,
+							     uid, NULL)))
+				return rc;
+			/* NOTE: you can not use list_pop, or list_push
+			   anywhere either, since mysql is
+			   exporting something of the same type as a macro,
+			   which messes everything up (my_list.h is
+			   the bad boy).
+			   So we are just going to delete each item as it
+			   comes out since we are moving it to the update_list.
+			*/
+			itr = list_iterator_create(assoc_list);
+			while((assoc = list_next(itr))) {
+				if(_addto_update_list(mysql_conn->update_list, 
+						      ACCT_MODIFY_ASSOC,
+						      assoc) == SLURM_SUCCESS) 
+					list_remove(itr);
+			}
+			list_iterator_destroy(itr);
+			list_destroy(assoc_list);
+		}
 	} else {
 		xfree(txn_query);
 		if(mysql_conn->rollback) {
@@ -3545,9 +3576,6 @@ end_it:
 		}
 		list_flush(mysql_conn->update_list);
 	}
-
-	xfree(old_parent);
-	xfree(old_cluster);
 					
 	return rc;
 #else
@@ -3616,7 +3644,9 @@ extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 			xfree(vals);
 			continue;
 		}
-
+		/* FIX ME: we have to edit all the other qos's to set
+		   there preemptee or preemptor based on what is here.
+		*/
 		/* we always have a ', ' as the first 2 chars */
 		tmp_extra = _fix_double_quotes(extra+2);
 
@@ -4131,6 +4161,7 @@ extern List acct_storage_p_modify_associations(
 	acct_user_rec_t user;
 	char *tmp_char1=NULL, *tmp_char2=NULL;
 	int set_qos_vals = 0;
+	int moved_parent = 0;
 
 	char *massoc_req_inx[] = {
 		"id",
@@ -4360,9 +4391,11 @@ extern List acct_storage_p_modify_associations(
 						row[MASSOC_CLUSTER],
 						row[MASSOC_ID],
 						row[MASSOC_PACCT],
-						assoc->parent_acct)
+						assoc->parent_acct,
+						now)
 				   == SLURM_ERROR)
 					break;
+				moved_parent = 1;
 			}
 			account_type = 1;
 		}
@@ -4466,7 +4499,8 @@ extern List acct_storage_p_modify_associations(
 					    row[MASSOC_ACCT],
 					    atoi(row[MASSOC_LFT]),
 					    atoi(row[MASSOC_RGT]),
-					    ret_list);
+					    ret_list,
+					    moved_parent);
 		}
 	}
 	mysql_free_result(result);
@@ -4512,7 +4546,47 @@ extern List acct_storage_p_modify_associations(
 			goto end_it;
 		}
 	}
+	if(moved_parent) {
+		List local_assoc_list = NULL;
+		ListIterator local_itr = NULL;
+		acct_association_rec_t *local_assoc = NULL;
+		//acct_association_cond_t local_assoc_cond;
+		/* now we need to send the update of the new parents and
+		 * limits, so just to be safe, send the whole
+		 * tree because we could have some limits that
+		 * were affected but not noticed.
+		 */
+		/* we can probably just look at the mod time now but
+		 * we will have to wait for the next revision number
+		 * since you can't query on mod time here and I don't
+		 * want to rewrite code to make it happen
+		 */
 
+		//bzero(&local_assoc_cond, sizeof(acct_association_cond_t));
+		
+		if(!(local_assoc_list = 
+		     acct_storage_p_get_associations(mysql_conn,
+						     uid, NULL)))
+			return ret_list;
+		/* NOTE: you can not use list_pop, or list_push
+		   anywhere either, since mysql is
+		   exporting something of the same type as a macro,
+		   which messes everything up (my_list.h is
+		   the bad boy).
+		   So we are just going to delete each item as it
+		   comes out since we are moving it to the update_list.
+		*/
+		local_itr = list_iterator_create(local_assoc_list);
+		while((local_assoc = list_next(local_itr))) {
+			if(_addto_update_list(mysql_conn->update_list, 
+					      ACCT_MODIFY_ASSOC,
+					      local_assoc) == SLURM_SUCCESS) 
+				list_remove(local_itr);
+		}
+		list_iterator_destroy(local_itr);
+		list_destroy(local_assoc_list);		
+	}
+	
 end_it:
 	xfree(name_char);
 	xfree(vals);
@@ -6578,7 +6652,7 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 	};
 
 	if(!assoc_cond) {
-		xstrcat(extra, "where deleted=0");
+		xstrcat(extra, " where deleted=0");
 		goto empty;
 	}
 
@@ -6847,10 +6921,13 @@ empty:
 				else 
 					parent_qos = NULL;
 
+				xfree(parent_delta_qos);
 				if(row2[ASSOC2_REQ_DELTA_QOS][0])
 					xstrcat(parent_delta_qos, 
 						row2[ASSOC2_REQ_DELTA_QOS]);
-			
+				else
+					parent_delta_qos = NULL;
+
 				if(row2[ASSOC2_REQ_MSJ])
 					parent_msj = atoi(row2[ASSOC2_REQ_MSJ]);
 				else
@@ -6977,6 +7054,7 @@ empty:
 
 	list_destroy(delta_qos_list);
 
+	xfree(parent_delta_qos);
 	xfree(parent_qos);
 
 	return assoc_list;
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
index 01209bd206e..9344e7f6a4f 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
@@ -97,6 +97,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.qos",
 		"t2.user",
 		"t2.cluster",
+		"t2.acct",
 		"t2.lft"
 	};
 
@@ -144,7 +145,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		JOB_REQ_PARTITION,
 		JOB_REQ_BLOCKID,
 		JOB_REQ_CLUSTER1,
-		JOB_REQ_ACCOUNT,
+		JOB_REQ_ACCOUNT1,
 		JOB_REQ_ELIGIBLE,
 		JOB_REQ_SUBMIT,
 		JOB_REQ_START,
@@ -162,6 +163,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		JOB_REQ_QOS,
 		JOB_REQ_USER_NAME,
 		JOB_REQ_CLUSTER,
+		JOB_REQ_ACCOUNT,
 		JOB_REQ_LFT,
 		JOB_REQ_COUNT		
 	};
@@ -499,8 +501,10 @@ no_cond:
 		if(row[JOB_REQ_LFT])
 			job->lft = atoi(row[JOB_REQ_LFT]);
 
-		if(row[JOB_REQ_ACCOUNT])
+		if(row[JOB_REQ_ACCOUNT] && row[JOB_REQ_ACCOUNT][0])
 			job->account = xstrdup(row[JOB_REQ_ACCOUNT]);
+		else if(row[JOB_REQ_ACCOUNT1] && row[JOB_REQ_ACCOUNT1][0])
+			job->account = xstrdup(row[JOB_REQ_ACCOUNT1]);
 
 		if(row[JOB_REQ_BLOCKID])
 			job->blockid = xstrdup(row[JOB_REQ_BLOCKID]);
diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c
index fd4ce39a0f8..669ddeb8234 100644
--- a/src/plugins/accounting_storage/mysql/mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c
@@ -288,7 +288,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				       job_str, job_table, assoc_table,
 				       curr_end, curr_start, curr_start);
 
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		if(!(result = mysql_db_query_ret(
 			     mysql_conn->db_conn, query, 0))) {
 			xfree(query);
@@ -342,7 +342,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					curr_end, curr_start,
 					row[JOB_REQ_DB_INX]);
 				
-				debug4("%d query\n%s", mysql_conn->conn, query);
+				debug4("%d(%d) query\n%s",
+				       mysql_conn->conn, __LINE__, query);
 				if(!(result2 = mysql_db_query_ret(
 					     mysql_conn->db_conn,
 					     query, 0))) {
@@ -369,8 +370,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					
 					seconds -= (local_end - local_start);
 				}
-				mysql_free_result(result2);			
-
+				mysql_free_result(result2);
 			}
 			if(seconds < 1) {
 				debug4("This job (%u) was suspended "
@@ -451,13 +451,13 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			 * commit field
 			 */
 			
-			if(c_usage->i_cpu < 0) {
+			if((int64_t)c_usage->i_cpu < 0) {
 /* 				info("got %d %d %d", c_usage->r_cpu, */
 /* 				     c_usage->i_cpu, c_usage->o_cpu); */
 				c_usage->r_cpu += c_usage->i_cpu;
 				c_usage->o_cpu -= c_usage->i_cpu;
 				c_usage->i_cpu = 0;
-				if(c_usage->r_cpu < 0)
+				if((int64_t)c_usage->r_cpu < 0)
 					c_usage->r_cpu = 0;
 			}
 			
@@ -510,7 +510,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   "over_cpu_secs=VALUES(over_cpu_secs), "
 				   "resv_cpu_secs=VALUES(resv_cpu_secs)",
 				   now);
-			debug3("%d query\n%s", mysql_conn->conn, query);
+			debug3("%d(%d) query\n%s",
+			       mysql_conn->conn, __LINE__, query);
 			rc = mysql_db_query(mysql_conn->db_conn, query);
 			xfree(query);
 			if(rc != SLURM_SUCCESS) {
@@ -548,7 +549,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   "alloc_cpu_secs=VALUES(alloc_cpu_secs)",
 				   now);
 					   	
-			debug3("%d query\n%s", mysql_conn->conn, query);
+			debug3("%d(%d) query\n%s",
+			       mysql_conn->conn, __LINE__, query);
 			rc = mysql_db_query(mysql_conn->db_conn, query);
 			xfree(query);
 			if(rc != SLURM_SUCCESS) {
@@ -633,7 +635,7 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 			   cluster_day_table, now, now, curr_start,
 			   cluster_hour_table,
 			   curr_end, curr_start, now);
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 		xfree(query);
 		if(rc != SLURM_SUCCESS) {
@@ -729,7 +731,7 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 			   cluster_month_table, now, now, curr_start,
 			   cluster_day_table,
 			   curr_end, curr_start, now);
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 		xfree(query);
 		if(rc != SLURM_SUCCESS) {
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index 90d78b19204..38916d9b23d 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -112,9 +112,8 @@ extern int init ( void )
 			      plugin_name);
 		xfree(cluster_name);
 		slurmdbd_auth_info = slurm_get_accounting_storage_pass();
-		if(!slurmdbd_auth_info)			
-			verbose("%s loaded AuthInfo=%s",
-				plugin_name, slurmdbd_auth_info);
+		verbose("%s loaded with AuthInfo=%s",
+			plugin_name, slurmdbd_auth_info);
 		first = 0;
 	} else {
 		debug4("%s loaded", plugin_name);
diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c
index 9d5259ed2ef..ee7a7770e41 100644
--- a/src/plugins/jobcomp/script/jobcomp_script.c
+++ b/src/plugins/jobcomp/script/jobcomp_script.c
@@ -64,6 +64,7 @@
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
+#include "src/common/node_select.h"
 #include "src/common/list.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -158,6 +159,15 @@ struct jobcomp_info {
 	char *partition;
 	char *jobstate;
 	char *account;
+#ifdef HAVE_BG
+	char *connect_type;
+	char *reboot;
+	char *rotate;
+	char *maxprocs;
+	char *geometry;
+	char *block_start;
+	char *blockid;
+#endif
 };
 
 static struct jobcomp_info * _jobcomp_info_create (struct job_record *job)
@@ -188,7 +198,22 @@ static struct jobcomp_info * _jobcomp_info_create (struct job_record *job)
 	j->nprocs = job->total_procs;
 	j->nnodes = job->node_cnt;
 	j->account = job->account ? xstrdup (job->account) : NULL;
-
+#ifdef HAVE_BG
+	j->connect_type = select_g_xstrdup_jobinfo(job->select_jobinfo,
+						   SELECT_PRINT_CONNECTION);
+	j->reboot = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					     SELECT_PRINT_REBOOT);
+	j->rotate = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					     SELECT_PRINT_ROTATE);
+	j->maxprocs = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					       SELECT_PRINT_MAX_PROCS);
+	j->geometry = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					       SELECT_PRINT_GEOMETRY);
+	j->block_start = select_g_xstrdup_jobinfo(job->select_jobinfo,
+						  SELECT_PRINT_START);
+	j->blockid = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					      SELECT_PRINT_BG_ID);
+#endif
 	return (j);
 }
 
@@ -201,6 +226,15 @@ static void _jobcomp_info_destroy (struct jobcomp_info *j)
 	xfree (j->nodes);
 	xfree (j->jobstate);
 	xfree (j->account);
+#ifdef HAVE_BG
+	xfree (j->connect_type);
+	xfree (j->reboot);
+	xfree (j->rotate);
+	xfree (j->maxprocs);
+	xfree (j->geometry);
+	xfree (j->block_start);
+	xfree (j->blockid);
+#endif
 	xfree (j);
 }
 
@@ -305,6 +339,16 @@ static char ** _create_environment (struct jobcomp_info *job)
 	_env_append (&env, "JOBSTATE",  job->jobstate);
 	_env_append (&env, "PARTITION", job->partition);
 	
+#ifdef HAVE_BG
+	_env_append (&env, "CONNECT_TYPE", job->connect_type);
+	_env_append (&env, "REBOOT",       job->reboot);
+	_env_append (&env, "ROTATE",       job->rotate);
+	_env_append (&env, "MAXPROCS",     job->maxprocs);
+	_env_append (&env, "GEOMETRY",     job->geometry);
+	_env_append (&env, "BLOCK_START",  job->block_start);
+	_env_append (&env, "BLOCKID",      job->blockid);
+#endif
+
 	if (job->limit == INFINITE)
 		_env_append (&env, "LIMIT", "UNLIMITED");
 	else 
@@ -317,6 +361,7 @@ static char ** _create_environment (struct jobcomp_info *job)
 #else
 	_env_append (&env, "PATH", "/bin:/usr/bin");
 #endif
+
 	return (env);
 }
 
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index dc5e601af31..a9bd6b0f500 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -217,7 +217,7 @@ static void _attempt_backfill(void)
 {
 	bool filter_root = false;
 	struct job_queue *job_queue = NULL;
-	int i, j,job_queue_size, node_space_recs = 0;
+	int i, j,job_queue_size, node_space_recs;
 	struct job_record *job_ptr;
 	struct part_record *part_ptr;
 	uint32_t end_time, end_reserve, time_limit;
@@ -238,9 +238,9 @@ static void _attempt_backfill(void)
 
 	node_space[0].begin_time = now;
 	node_space[0].end_time = now + BACKFILL_WINDOW;
-	node_space[0].avail_bitmap = bit_alloc(node_record_count);
-	bit_or(node_space[0].avail_bitmap, avail_node_bitmap);
+	node_space[0].avail_bitmap = bit_copy(avail_node_bitmap);
 	node_space[0].next = 0;
+	node_space_recs = 1;
 #if __DEBUG
 	_dump_node_space_table(node_space);
 #endif
@@ -248,6 +248,10 @@ static void _attempt_backfill(void)
 	for (i = 0; i < job_queue_size; i++) {
 		job_ptr = job_queue[i].job_ptr;
 		part_ptr = job_ptr->part_ptr;
+#if __DEBUG
+		info("backfill test for job %u", job_ptr->job_id);
+#endif
+
 		if (part_ptr == NULL) {
 			part_ptr = find_part_record(job_ptr->partition);
 			xassert(part_ptr);
@@ -299,7 +303,6 @@ static void _attempt_backfill(void)
 				time_limit = MIN(job_ptr->time_limit,
 						 part_ptr->max_time);
 		}
-		/* Permit a bit of extra time for job clean-up */
 		end_time = (time_limit * 60) + now;
 
 		/* Identify usable nodes for this job */
@@ -307,10 +310,11 @@ static void _attempt_backfill(void)
 		avail_bitmap = bit_copy(part_ptr->node_bitmap);
 		bit_and(avail_bitmap, up_node_bitmap);
 		for (j=0; ; ) {
-			if (node_space[j].end_time <= end_time) {
+			if (node_space[j].begin_time <= end_time) {
 				bit_and(avail_bitmap, 
 					node_space[j].avail_bitmap);
-			}
+			} else
+				break;
 			if ((j = node_space[j].next) == 0)
 				break;
 		}
@@ -327,19 +331,23 @@ static void _attempt_backfill(void)
 				    avail_bitmap)))
 			continue;	/* required nodes missing */
 		if (bit_set_count(avail_bitmap) < min_nodes)
-			continue;	/* no nodes remain */
+			continue;	/* insufficient nodes remain */
 
 		/* Try to schedule the job. First on dedicated nodes
 		 * then on shared nodes (if so configured). */
 		orig_shared = job_ptr->details->shared;
 		job_ptr->details->shared = 0;
 		tmp_bitmap = bit_copy(avail_bitmap);
-		j = select_nodes(job_ptr, true, &avail_bitmap); 
+		j = select_g_job_test(job_ptr, avail_bitmap, min_nodes,
+				      max_nodes, req_nodes,
+				      SELECT_MODE_WILL_RUN);
 		job_ptr->details->shared = orig_shared;
 		if ((j != SLURM_SUCCESS) && (orig_shared != 0)) {
 			FREE_NULL_BITMAP(avail_bitmap);
 			avail_bitmap= tmp_bitmap;
-			j = select_nodes(job_ptr, true, &avail_bitmap);
+			j = select_g_job_test(job_ptr, avail_bitmap, min_nodes,
+					      max_nodes, req_nodes,
+					      SELECT_MODE_WILL_RUN);
 		} else
 			FREE_NULL_BITMAP(tmp_bitmap);
 		if (j != SLURM_SUCCESS)
@@ -487,50 +495,10 @@ static void _add_reservation(uint32_t start_time, uint32_t end_reserve,
 			break;
 	}
 
-#if 0
-	/* This records end of reservation so we maintain a full map
-	 * of when jobs start and end. Since we only care about starting 
-	 * jobs right now, the end of reservation time is not very useful
-	 * unless we want to track expected job initiation time, which 
-	 * would necessitate additional logic. */
-	for (j=0; ; ) {
-		if ((node_space[j].begin_time < end_reserve) &&
-		    (node_space[j].end_time   > end_reserve)) {
-			/* insert end entry record */
-			i = *node_space_recs;
-			node_space[i].begin_time = node_space[j].begin_time;
-			node_space[j].begin_time = end_reserve;
-			node_space[i].end_time = end_reserve;
-			node_space[i].avail_bitmap = 
-				bit_copy(node_space[j].avail_bitmap);
-			node_space[i].next = j;
-			node_space[previous].next = i;
-			(*node_space_recs)++;
-			break;
-		}
-		if (node_space[j].end_time == end_reserve) {
-			/* no need to insert end entry record */
-			break;
-		}
-		previous = j;
-		if ((j = node_space[j].next) == 0)
-			break;
-	}
-
-	for (j=0; ; ) {
-		if ((node_space[j].begin_time >= start_time) &&
-		    (node_space[j].end_time   <= end_reserve)) {
-			bit_and(node_space[j].avail_bitmap, res_bitmap);
-		}
-		if ((j = node_space[j].next) == 0)
-			break;
-	}
-#else
 	for (j=0; ; ) {
 		if (node_space[j].begin_time >= start_time)
 			bit_and(node_space[j].avail_bitmap, res_bitmap);
 		if ((j = node_space[j].next) == 0)
 			break;
 	}
-#endif
 }
diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c
index 714a38976b9..a7f20323266 100644
--- a/src/plugins/sched/wiki2/job_modify.c
+++ b/src/plugins/sched/wiki2/job_modify.c
@@ -235,7 +235,7 @@ host_fini:	if (rc) {
 /* Modify a job:
  *	CMD=MODIFYJOB ARG=<jobid> PARTITION=<name> NODES=<number>
  *		DEPEND=afterany:<jobid> TIMELIMT=<seconds> BANK=<name>
- *		MINSTARTTIME=<uts> RFEATURES=<features>
+ *		JOBNAME=<name> MINSTARTTIME=<uts> RFEATURES=<features>
  * RET 0 on success, -1 on failure */
 extern int	job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg)
 {
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 1635f46e694..5790705c8d0 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -82,7 +82,7 @@ static bg_record_t *_find_matching_block(List block_list,
 					 bitstr_t* slurm_block_bitmap,
 					 ba_request_t *request,
 					 uint32_t max_procs,
-					 int allow, int check_image,
+					 int *allow, int check_image,
 					 int overlap_check,
 					 List overlapped_list,
 					 bool test_only);
@@ -364,7 +364,7 @@ static bg_record_t *_find_matching_block(List block_list,
 					 bitstr_t* slurm_block_bitmap,
 					 ba_request_t *request,
 					 uint32_t max_procs,
-					 int allow, int check_image,
+					 int *allow, int check_image,
 					 int overlap_check,
 					 List overlapped_list,
 					 bool test_only)
@@ -453,22 +453,22 @@ static bg_record_t *_find_matching_block(List block_list,
 			if(request->blrtsimage &&
 			   strcasecmp(request->blrtsimage,
 				      bg_record->blrtsimage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			} else if(request->linuximage &&
 			   strcasecmp(request->linuximage,
 				      bg_record->linuximage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			} else if(request->mloaderimage &&
 			   strcasecmp(request->mloaderimage, 
 				      bg_record->mloaderimage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			} else if(request->ramdiskimage &&
 			   strcasecmp(request->ramdiskimage,
 				      bg_record->ramdiskimage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			}			
 		}
@@ -981,7 +981,7 @@ static int _find_best_block_match(List block_list,
 						 slurm_block_bitmap,
 						 &request,
 						 max_procs,
-						 allow, check_image,
+						 &allow, check_image,
 						 overlap_check, 
 						 overlapped_list,
 						 test_only);
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.h b/src/plugins/select/bluegene/plugin/bg_record_functions.h
index 06b858962f7..d168814d15e 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.h
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.h
@@ -72,20 +72,11 @@ typedef struct bg_record {
 					   being modified or not at
 					   job launch usually */
 	uid_t user_uid;   		/* Owner of block uid	*/
-	uint16_t state;         	/* really rm_partition_state_t
-					 * but needs to be uint16_t
-					 * for packing purposes.
-					 * Current state of the block */
+	rm_partition_state_t state;     /* Current state of the block */
 	int start[BA_SYSTEM_DIMENSIONS];/* start node */
 	uint16_t geo[BA_SYSTEM_DIMENSIONS];  /* geometry */
-	uint16_t conn_type;     	/* really rm_connection_type_t
-					 * but needs to be uint16_t
-					 * for packing purposes. Mesh
-					 * or Torus or NAV */
-	uint16_t node_use;	        /* really rm_partition_mode_t
-					 * but needs to be uint16_t
-					 * for packing purposes.
-					 * either COPROCESSOR or VIRTUAL */
+	rm_connection_type_t conn_type;  /* MESH or Torus or NAV */
+	rm_partition_mode_t node_use;	 /* either COPROCESSOR or VIRTUAL */
 	rm_partition_t *bg_block;       /* structure to hold info from db2 */
 	List bg_block_list;             /* node list of blocks in block */
 	int bp_count;                   /* size */
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index 95624e3b0b1..8c2b9410510 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -541,7 +541,7 @@ extern int bg_free_block(bg_record_t *bg_record)
 		}
 		
 		slurm_mutex_lock(&block_state_mutex);			
-		if (bg_record->state != (uint16_t)NO_VAL
+		if (bg_record->state != NO_VAL
 		    && bg_record->state != RM_PARTITION_FREE 
 		    && bg_record->state != RM_PARTITION_DEALLOCATING) {
 #ifdef HAVE_BG_FILES
diff --git a/src/sacct/print.c b/src/sacct/print.c
index f94a8e3da0c..58fdd010c11 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -257,19 +257,19 @@ void print_job(type_t type, void *object)
 
 	switch(type) {
 	case HEADLINE:
-		printf("%-8s", "Job");
+		printf("%-12s", "Job");
 		break;
 	case UNDERSCORE:
-		printf("%-8s", "--------");
+		printf("%-12s", "------------");
 		break;
 	case JOB:
-		printf("%-8u", job->jobid);
+		printf("%-12u", job->jobid);
 		break;
 	case JOBSTEP:
-		printf("%-8s", " ");
+		printf("%-12s", " ");
 		break;
 	default:
-		printf("%-8s", "n/a");
+		printf("%-12s", "n/a");
 		break;
 	} 
 }
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index c1c75fdef05..eb656fd7fca 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -73,6 +73,13 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if (!end && 
 			   !strncasecmp (argv[i], "WithCoordinators", 5)) {
 			acct_cond->with_coords = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithRawQOS", 5)) {
+			assoc_cond->with_raw_qos = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPInfo", 4)) {
+			assoc_cond->without_parent_info = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPLimits", 4)) {
+			assoc_cond->without_parent_limits = 1;
 		} else if (!end && 
 			   !strncasecmp (argv[i], "WithSubAccounts", 5)) {
 			assoc_cond->with_sub_accts = 1;
@@ -1263,6 +1270,10 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 							 field_count));
 						break;
 					default:
+						field->print_routine(
+							field, NULL,
+							(curr_inx == 
+							 field_count));
 						break;
 					}
 					curr_inx++;
@@ -1333,6 +1344,9 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 							 field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 				curr_inx++;
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index 9dd3af80dea..fcbedb90ecb 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -53,6 +53,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 			tree_display = 1;
 		} else if (!end && !strncasecmp (argv[i], "WithDeleted", 5)) {
 			assoc_cond->with_deleted = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithRawQOS", 5)) {
+			assoc_cond->with_raw_qos = 1;
 		} else if (!end && 
 			   !strncasecmp (argv[i], "WithSubAccounts", 5)) {
 			assoc_cond->with_sub_accts = 1;
@@ -785,6 +788,9 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 						     (curr_inx == field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
 			curr_inx++;
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index eeb9f074637..74a99cdaebc 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -652,6 +652,9 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 					(curr_inx == field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
 			curr_inx++;
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index aea230c894c..7d9e693bb25 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -995,7 +995,8 @@ extern void sacctmgr_print_assoc_limits(acct_association_rec_t *assoc)
 	if(assoc->grp_cpu_mins == INFINITE)
 		printf("  GrpCPUMins    = NONE\n");
 	else if(assoc->grp_cpu_mins != NO_VAL) 
-		printf("  GrpCPUMins    = %llu\n", assoc->grp_cpu_mins);
+		printf("  GrpCPUMins    = %llu\n", 
+		       (long long unsigned)assoc->grp_cpu_mins);
 		
 	if(assoc->grp_cpus == INFINITE)
 		printf("  GrpCPUs       = NONE\n");
@@ -1030,7 +1031,8 @@ extern void sacctmgr_print_assoc_limits(acct_association_rec_t *assoc)
 	if(assoc->max_cpu_mins_pj == INFINITE)
 		printf("  MaxCPUMins    = NONE\n");
 	else if(assoc->max_cpu_mins_pj != NO_VAL) 
-		printf("  MaxCPUMins    = %llu\n", assoc->max_cpu_mins_pj);
+		printf("  MaxCPUMins    = %llu\n", 
+		       (long long unsigned)assoc->max_cpu_mins_pj);
 		
 	if(assoc->max_cpus_pj == INFINITE)
 		printf("  MaxCPUs       = NONE\n");
@@ -1067,7 +1069,7 @@ extern void sacctmgr_print_assoc_limits(acct_association_rec_t *assoc)
 		char *temp_char = get_qos_complete_str(qos_list,
 						       assoc->qos_list);
 		if(temp_char) {		
-			printf("  Qos           = %s\n", temp_char);
+			printf("  QOS           = %s\n", temp_char);
 			xfree(temp_char);
 		}
 		if(qos_list)
@@ -1090,7 +1092,8 @@ extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos)
 	if(qos->grp_cpu_mins == INFINITE)
 		printf("  GrpCPUMins     = NONE\n");
 	else if(qos->grp_cpu_mins != NO_VAL) 
-		printf("  GrpCPUMins     = %llu\n", qos->grp_cpu_mins);
+		printf("  GrpCPUMins     = %llu\n", 
+		       (long long unsigned)qos->grp_cpu_mins);
 		
 	if(qos->grp_cpus == INFINITE)
 		printf("  GrpCPUs        = NONE\n");
@@ -1125,7 +1128,8 @@ extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos)
 	if(qos->max_cpu_mins_pu == INFINITE)
 		printf("  MaxCPUMins     = NONE\n");
 	else if(qos->max_cpu_mins_pu != NO_VAL) 
-		printf("  MaxCPUMins     = %llu\n", qos->max_cpu_mins_pu);
+		printf("  MaxCPUMins     = %llu\n", 
+		       (long long unsigned)qos->max_cpu_mins_pu);
 		
 	if(qos->max_cpus_pu == INFINITE)
 		printf("  MaxCPUs        = NONE\n");
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index 974a6eb37fb..173a4daa4ad 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -649,7 +649,7 @@ static List _set_up_print_fields(List format_list)
 	return print_fields_list;
 }
 
-static int _print_out_assoc(List assoc_list, bool user)
+static int _print_out_assoc(List assoc_list, bool user, bool add)
 {
 	List format_list = NULL;
 	List print_fields_list = NULL;
@@ -664,12 +664,12 @@ static int _print_out_assoc(List assoc_list, bool user)
 	format_list = list_create(slurm_destroy_char);
 	if(user)
 		slurm_addto_char_list(format_list,
-				      "User,Account,F,GrpCH,GrpC,"
+				      "User,Account,F,GrpCPUM,GrpCPUs,"
 				      "GrpJ,GrpN,GrpS,GrpW,MaxCPUM,MaxCPUs,"
 				      "MaxJ,MaxS,MaxN,MaxW,QOS");
 	else 
 		slurm_addto_char_list(format_list,
-				      "Account,Parent,F,GrpCH,GrpC,"
+				      "Account,Parent,F,GrpCPUM,GrpCPUs,"
 				      "GrpJ,GrpN,GrpS,GrpW,MaxCPUM,MaxCPUs,"
 				      "MaxJ,MaxS,MaxN,MaxW,QOS");
 	
@@ -762,6 +762,8 @@ static int _print_out_assoc(List assoc_list, bool user)
 						     assoc->user);
 				break;
 			default:
+				field->print_routine(
+					field, NULL);
 				break;
 			}
 		}
@@ -771,8 +773,10 @@ static int _print_out_assoc(List assoc_list, bool user)
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr2);
 	list_destroy(print_fields_list);
-	rc = acct_storage_g_add_associations(db_conn, my_uid, assoc_list);
-	printf("---------------------------------------------------\n\n");
+	if(add)
+		rc = acct_storage_g_add_associations(db_conn, 
+						     my_uid, assoc_list);
+	printf("--------------------------------------------------------------\n\n");
 
 	return rc;
 }
@@ -1484,6 +1488,70 @@ static acct_account_rec_t *_set_acct_up(sacctmgr_file_opts_t *file_opts,
 	return acct;
 }
 
+static acct_association_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
+					     sacctmgr_mod_type_t mod_type,
+					     char *cluster, char *parent)
+{
+	acct_association_rec_t *assoc = NULL;
+
+	if(!cluster) {
+		error("No cluster name was given for _set_assoc_up");
+		return NULL;
+	}
+
+	if(!parent && (mod_type != MOD_CLUSTER)) {
+		error("No parent was given for _set_assoc_up");
+		return NULL;
+	}
+
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	init_acct_association_rec(assoc);
+
+	switch(mod_type) {
+	case MOD_CLUSTER:
+		assoc->acct = xstrdup(parent);
+		assoc->cluster = xstrdup(cluster);
+		break;
+	case MOD_ACCT:
+		assoc->acct = xstrdup(file_opts->name);
+		assoc->cluster = xstrdup(cluster);
+		assoc->parent_acct = xstrdup(parent);
+		break;
+	case MOD_USER:
+		assoc->acct = xstrdup(parent);
+		assoc->cluster = xstrdup(cluster);
+		assoc->partition = xstrdup(file_opts->part);
+		assoc->user = xstrdup(file_opts->name);
+		break;
+	default:
+		error("Unknown mod type for _set_assoc_up %d", mod_type);
+		destroy_acct_association_rec(assoc);
+		assoc = NULL;
+		break;
+	}
+
+	
+	assoc->fairshare = file_opts->fairshare;
+	
+	assoc->grp_cpu_mins = file_opts->grp_cpu_mins;
+	assoc->grp_cpus = file_opts->grp_cpus;
+	assoc->grp_jobs = file_opts->grp_jobs;
+	assoc->grp_nodes = file_opts->grp_nodes;
+	assoc->grp_submit_jobs = file_opts->grp_submit_jobs;
+	assoc->grp_wall = file_opts->grp_wall;
+	
+	assoc->max_jobs = file_opts->max_jobs;
+	assoc->max_nodes_pj = file_opts->max_nodes_pj;
+	assoc->max_wall_pj = file_opts->max_wall_pj;
+	assoc->max_cpu_mins_pj = file_opts->max_cpu_mins_pj;
+
+	if(file_opts->qos_list && list_count(file_opts->qos_list)) 
+		assoc->qos_list = copy_char_list(file_opts->qos_list);
+
+
+	return assoc;
+}
+
 static int _print_file_sacctmgr_assoc_childern(FILE *fd, 
 					       List sacctmgr_assoc_list,
 					       List user_list,
@@ -1921,21 +1989,23 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 
 			if(!(cluster = sacctmgr_find_cluster_from_list(
 				     curr_cluster_list, cluster_name))) {
+				List temp_assoc_list = list_create(NULL);
 				List cluster_list =
 					list_create(destroy_acct_cluster_rec);
+
 				cluster = xmalloc(sizeof(acct_cluster_rec_t));
 				list_append(cluster_list, cluster);
 				cluster->name = xstrdup(cluster_name);
-				cluster->root_assoc->fairshare =
-					file_opts->fairshare;		
-				cluster->root_assoc->max_cpu_mins_pj = 
-					file_opts->max_cpu_mins_pj;
-				cluster->root_assoc->max_jobs = file_opts->max_jobs;
-				cluster->root_assoc->max_nodes_pj = 
-					file_opts->max_nodes_pj;
-				cluster->root_assoc->max_wall_pj = 
-					file_opts->max_wall_pj;
+				cluster->root_assoc = _set_assoc_up(
+					file_opts, MOD_CLUSTER,
+					cluster_name, "root");
+				list_append(temp_assoc_list,
+					    cluster->root_assoc);
+				
+				rc = _print_out_assoc(temp_assoc_list, 0, 0);
+				list_destroy(temp_assoc_list);
 				notice_thread_init();
+				
 				rc = acct_storage_g_add_clusters(
 					db_conn, my_uid, cluster_list);
 				notice_thread_fini();
@@ -2038,18 +2108,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				/* don't add anything to the
 				   curr_acct_list */
 
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(file_opts->name);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->parent_acct = xstrdup(parent);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_pj =
-					file_opts->max_nodes_pj;
-				assoc->max_wall_pj =
-					file_opts->max_wall_pj;
-				assoc->max_cpu_mins_pj = 
-					file_opts->max_cpu_mins_pj;
+				assoc = _set_assoc_up(file_opts, MOD_ACCT,
+						      cluster_name, parent);
+
 				list_append(acct_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -2074,18 +2135,10 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				} else {
 					debug2("already modified this account");
 				}
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(file_opts->name);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->parent_acct = xstrdup(parent);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_pj =
-					file_opts->max_nodes_pj;
-				assoc->max_wall_pj =
-					file_opts->max_wall_pj;
-				assoc->max_cpu_mins_pj = 
-					file_opts->max_cpu_mins_pj;
+
+				assoc = _set_assoc_up(file_opts, MOD_ACCT,
+						      cluster_name, parent);
+
 				list_append(acct_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -2112,6 +2165,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				if(!assoc2) {
 					assoc2 = xmalloc(
 						sizeof(acct_association_rec_t));
+					init_acct_association_rec(assoc2);
 					list_append(mod_assoc_list, assoc2);
 					assoc2->cluster = xstrdup(cluster_name);
 					assoc2->acct = xstrdup(file_opts->name);
@@ -2144,20 +2198,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				/* don't add anything to the
 				   curr_user_list */
 
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(parent);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_pj =
-					file_opts->max_nodes_pj;
-				assoc->max_wall_pj =
-					file_opts->max_wall_pj;
-				assoc->max_cpu_mins_pj = 
-					file_opts->max_cpu_mins_pj;
-				assoc->partition = xstrdup(file_opts->part);
-				assoc->user = xstrdup(file_opts->name);
-				
+				assoc = _set_assoc_up(file_opts, MOD_USER,
+						      cluster_name, parent);
+
 				list_append(user_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -2194,20 +2237,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					debug2("already modified this user");
 				}
 			new_association:
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(parent);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_pj =
-					file_opts->max_nodes_pj;
-				assoc->max_wall_pj =
-					file_opts->max_wall_pj;
-				assoc->max_cpu_mins_pj = 
-					file_opts->max_cpu_mins_pj;
-				assoc->partition = xstrdup(file_opts->part);
-				assoc->user = xstrdup(file_opts->name);
-				
+				assoc = _set_assoc_up(file_opts, MOD_USER,
+						      cluster_name, parent);
+
 				list_append(user_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -2234,6 +2266,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				if(!assoc2) {
 					assoc2 = xmalloc(
 						sizeof(acct_association_rec_t));
+					init_acct_association_rec(assoc2);
 					list_append(mod_assoc_list, assoc2);
 					assoc2->cluster = xstrdup(cluster_name);
 					assoc2->acct = xstrdup(parent);
@@ -2291,6 +2324,8 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 						field, acct->organization);
 					break;
 				default:
+					field->print_routine(
+						field, NULL);
 					break;
 				}
 			}
@@ -2308,7 +2343,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	
 	if(rc == SLURM_SUCCESS && list_count(acct_assoc_list)) {
 		printf("Account Associations\n");
-		_print_out_assoc(acct_assoc_list, 0);
+		rc = _print_out_assoc(acct_assoc_list, 0, 1);
 		set = 1;
 	}
 	if(rc == SLURM_SUCCESS && list_count(user_list)) {
@@ -2347,6 +2382,8 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 						field, user->name);
 					break;
 				default:
+					field->print_routine(
+						field, NULL);
 					break;
 				}
 			}
@@ -2365,7 +2402,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	
 	if(rc == SLURM_SUCCESS && list_count(user_assoc_list)) {
 		printf("User Associations\n");
-		_print_out_assoc(user_assoc_list, 1);
+		rc = _print_out_assoc(user_assoc_list, 1, 1);
 		set = 1;
 	}
 	END_TIMER2("add cluster");
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index 001345d4af5..7177594da14 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -674,6 +674,9 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 					(curr_inx == field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
 			curr_inx++;
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 55161061e53..87cb5ab1ac5 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -671,14 +671,23 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
      associations             when using show/list will list the           \n\
                               associations associated with the entity.     \n\
      delete <ENTITY> <SPECS>  delete the specified entity(s)               \n\
-     dump <CLUSTER> <FILE>    dump database information of the             \n\
+     dump <CLUSTER> [<FILE>]  dump database information of the             \n\
                               specified cluster to the flat file.          \n\
+                              Will default to clustername.cfg if no file   \n\
+                              is given.                                    \n\
      exit                     terminate sacctmgr                           \n\
      help                     print this description of use.               \n\
      list <ENTITY> [<SPECS>]  display info of identified entity, default   \n\
                               is display all.                              \n\
-     load <FILE>              read in the file to update the database      \n\
-                              with the file contents.                      \n\
+     load <FILE> [<SPECS>]    read in the file to update the database      \n\
+                              with the file contents. <SPECS> here consist \n\
+                              of 'cluster=', and 'clean'.  The 'cluster='  \n\
+                              will override the cluster name given in the  \n\
+                              file.  The 'clean' option will remove what is\n\
+                              already in the system for this cluster and   \n\
+                              replace it with the file.  If the clean option\n\
+                              is not given only new additions or           \n\
+                              modifications will be done, no deletions.    \n\
      modify <ENTITY> <SPECS>  modify entity                                \n\
      oneliner                 report output one record per line.           \n\
      parsable                 output will be | delimited with an ending '|'\n\
@@ -718,8 +727,8 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             WithSubAccounts, WithDeleted, WOPInfo,         \n\
                             and WOPLimits                                  \n\
                                                                            \n\
-       list cluster       - Names= Format=                                 \n\
-       add cluster        - Fairshare=, GrpCPUMins=, GrpCPUs=, GrpJobs=,  \n\
+       list cluster       - Format=, Names=                                \n\
+       add cluster        - Fairshare=, GrpCPUMins=, GrpCPUs=, GrpJobs=,   \n\
                             GrpNodes=, GrpSubmitJob=, GrpWall=, MaxCPUMins=\n\
                             MaxJobs=, MaxNodes=, MaxWall=, and Name=       \n\
        modify cluster     - (set options) Fairshare=, GrpCPUMins=,         \n\
@@ -732,8 +741,12 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        add coordinator    - Accounts=, and Names=                          \n\
        delete coordinator - Accounts=, and Names=                          \n\
                                                                            \n\
-       list qos           - Descriptions=, Ids=, Names=, and WithDeleted   \n\
-       add qos            - Description=, and Names=                       \n\
+       list qos           - Descriptions=, Format=, Ids=, Names=,          \n\
+                            and WithDeleted                                \n\
+       add qos            - Description=, GrpCPUMins=, GrpCPUs=, GrpJobs=, \n\
+                            GrpNodes=, GrpSubmitJob=, GrpWall=, JobFlags=, \n\
+                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxWall=,    \n\
+                            Preemptee=, Preemptor=, Priority=, and Names=  \n\
        delete qos         - Descriptions=, Ids=, and Names=                \n\
                                                                            \n\
        list transactions  - Actor=, EndTime,                               \n\
diff --git a/src/sacctmgr/txn_functions.c b/src/sacctmgr/txn_functions.c
index 059dedffd2d..47d69a11f88 100644
--- a/src/sacctmgr/txn_functions.c
+++ b/src/sacctmgr/txn_functions.c
@@ -132,6 +132,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	char *object = NULL;
+	int field_count = 0;
 
 	print_field_t *field = NULL;
 
@@ -250,47 +251,62 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
+	field_count = list_count(print_fields_list);
+
 	while((txn = list_next(itr))) {
+		int curr_inx = 1;
 		while((field = list_next(itr2))) {
 			switch(field->type) {
 			case PRINT_ACCT:
-				field->print_routine(field, txn->accts);
+				field->print_routine(field, txn->accts,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_ACTION:
 				field->print_routine(
 					field, 
 					slurmdbd_msg_type_2_str(txn->action,
-								0));
+								0),
+					(curr_inx == field_count));
 				break;
 			case PRINT_ACTOR:
 				field->print_routine(field,
-						     txn->actor_name);
+						     txn->actor_name,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_CLUSTER:
-				field->print_routine(field, txn->clusters);
+				field->print_routine(field, txn->clusters,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_ID:
 				field->print_routine(field,
-						     txn->id);
+						     txn->id,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_INFO:
 				field->print_routine(field, 
-						     txn->set_info);
+						     txn->set_info,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_TS:
 				field->print_routine(field,
-						     txn->timestamp);
+						     txn->timestamp,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_USER:
-				field->print_routine(field, txn->users);
+				field->print_routine(field, txn->users,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_WHERE:
 				field->print_routine(field, 
-						     txn->where_query);
+						     txn->where_query,
+						     (curr_inx == field_count));
 				break;
 			default:
-				break;
+				field->print_routine(field, NULL,
+						     (curr_inx == field_count));
+					break;
 			}
+			curr_inx++;
 		}
 		list_iterator_reset(itr2);
 		printf("\n");
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 4a5b9cd9bce..5a7b961130b 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -79,6 +79,13 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if (!end && 
 			   !strncasecmp (argv[i], "WithCoordinators", 5)) {
 			user_cond->with_coords = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithRawQOS", 5)) {
+			assoc_cond->with_raw_qos = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPInfo", 4)) {
+			assoc_cond->without_parent_info = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPLimits", 4)) {
+			assoc_cond->without_parent_limits = 1;
 		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
 			continue;
 		} else if(!end
@@ -1607,6 +1614,10 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 							 field_count));
 						break;
 					default:
+						field->print_routine(
+							field, NULL,
+							(curr_inx ==
+							 field_count));
 						break;
 					}
 					curr_inx++;
@@ -1677,6 +1688,9 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 						(curr_inx == field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 			curr_inx++;
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index c1a48258353..dbe87b40595 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -191,15 +191,6 @@ int main(int argc, char *argv[])
 		slurm_allocation_msg_thr_destroy(msg_thr);
 		exit(1);
 	}
-	after = time(NULL);
-
-	xsignal(SIGHUP, _exit_on_signal);
-	xsignal(SIGINT, _ignore_signal);
-	xsignal(SIGQUIT, _ignore_signal);
-	xsignal(SIGPIPE, _ignore_signal);
-	xsignal(SIGTERM, _ignore_signal);
-	xsignal(SIGUSR1, _ignore_signal);
-	xsignal(SIGUSR2, _ignore_signal);
 
 	/*
 	 * Allocation granted!
@@ -212,6 +203,16 @@ int main(int argc, char *argv[])
 	}
 
 #endif
+	after = time(NULL);
+
+	xsignal(SIGHUP, _exit_on_signal);
+	xsignal(SIGINT, _ignore_signal);
+	xsignal(SIGQUIT, _ignore_signal);
+	xsignal(SIGPIPE, _ignore_signal);
+	xsignal(SIGTERM, _ignore_signal);
+	xsignal(SIGUSR1, _ignore_signal);
+	xsignal(SIGUSR2, _ignore_signal);
+
 	if (opt.bell == BELL_ALWAYS
 	    || (opt.bell == BELL_AFTER_DELAY
 		&& ((after - before) > DEFAULT_BELL_DELAY))) {
diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index e12ccddbf5c..39b86779a67 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -987,8 +987,8 @@ cleanup:
 						 thread_ptr->start_time);
 	/* Signal completion so another thread can replace us */
 	(*threads_active_ptr)--;
-	slurm_mutex_unlock(thread_mutex_ptr);
 	pthread_cond_signal(thread_cond_ptr);
+	slurm_mutex_unlock(thread_mutex_ptr);
 	return (void *) NULL;
 }
 
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 1b6706e7146..f38830abc29 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -354,16 +354,12 @@ int main(int argc, char *argv[])
 	/*
 	 * Initialize plugins.
 	 */
-	if ( slurm_select_init() != SLURM_SUCCESS )
+	if (slurm_select_init() != SLURM_SUCCESS )
 		fatal( "failed to initialize node selection plugin" );
-	if ( checkpoint_init(slurmctld_conf.checkpoint_type) != 
-			SLURM_SUCCESS )
+	if (checkpoint_init(slurmctld_conf.checkpoint_type) != SLURM_SUCCESS )
 		fatal( "failed to initialize checkpoint plugin" );
-	if (slurm_select_init() != SLURM_SUCCESS )
-		fatal( "failed to initialize node selection plugin");
 	if (slurm_acct_storage_init(NULL) != SLURM_SUCCESS )
 		fatal( "failed to initialize accounting_storage plugin");
-
 	if (slurm_jobacct_gather_init() != SLURM_SUCCESS )
 		fatal( "failed to initialize jobacct_gather plugin");
 
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index 3b3ba8d5c0e..aaff5238186 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -1159,7 +1159,9 @@ static int _init_conn(slurmdbd_conn_t *slurmdbd_conn,
 	}
 	*uid = init_msg->uid;
 	
-	debug("DBD_INIT: VERSION:%u UID:%u", init_msg->version, init_msg->uid);
+	debug("DBD_INIT: VERSION:%u UID:%u IP:%s CONN:%u",
+	      init_msg->version, init_msg->uid, 
+	      slurmdbd_conn->ip, slurmdbd_conn->newsockfd);
 	slurmdbd_conn->db_conn = acct_storage_g_get_connection(
 		false, slurmdbd_conn->newsockfd, init_msg->rollback);
 	slurmdbd_conn->rpc_version = init_msg->version;
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index d1a745b5566..2797f630a75 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -75,6 +75,7 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
 	
 	if(!assoc_cond) {
 		error("We need an acct_association_cond to call this");
@@ -140,8 +141,15 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 			list_append(assoc_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&assoc_cond->usage_start,
-			   (time_t *)&assoc_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = assoc_cond->usage_start;
+	end_time = assoc_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	assoc_cond->usage_start = start_time;
+	assoc_cond->usage_end = end_time;
 
 	return set;
 }
@@ -154,6 +162,15 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
+
+	if(!cluster_cond) {
+		error("We need an acct_cluster_cond to call this");
+		return SLURM_ERROR;
+	}
+
+	cluster_cond->with_deleted = 1;
+	cluster_cond->with_usage = 1;
 
 	if(!cluster_cond->cluster_list)
 		cluster_cond->cluster_list = list_create(slurm_destroy_char);
@@ -166,7 +183,6 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			continue;
 		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
 			local_cluster_flag = 1;
-			continue;
 		} else if(!end
 			  || !strncasecmp (argv[i], "Clusters", 1)
 			  || !strncasecmp (argv[i], "Names", 1)) {
@@ -178,7 +194,8 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
-				slurm_addto_char_list(format_list, argv[i]+end);
+				slurm_addto_char_list(format_list,
+						      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Start", 1)) {
 			cluster_cond->usage_start = parse_time(argv[i]+end, 1);
 			set = 1;
@@ -196,8 +213,15 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			list_append(cluster_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&cluster_cond->usage_start,
-			   (time_t *)&cluster_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = cluster_cond->usage_start;
+	end_time = cluster_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	cluster_cond->usage_start = start_time;
+	cluster_cond->usage_end = end_time;
 
 	return set;
 }
@@ -362,9 +386,10 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = cluster_cond->usage_start;
 		time_t my_end = cluster_cond->usage_end-1;
 
-		slurm_make_time_str((time_t *)&cluster_cond->usage_start, 
+		slurm_make_time_str(&my_start, 
 				    start_char, sizeof(start_char));
 		slurm_make_time_str(&my_end,
 				    end_char, sizeof(end_char));
@@ -453,6 +478,7 @@ extern int cluster_utilization(int argc, char *argv[])
 		list_iterator_destroy(itr3);
 
 		total_acct.cpu_count /= list_count(cluster->accounting_list);
+		
 		local_total_time =
 			(uint64_t)total_time * (uint64_t)total_acct.cpu_count;
 		total_reported = total_acct.alloc_secs + total_acct.down_secs 
@@ -515,6 +541,9 @@ extern int cluster_utilization(int argc, char *argv[])
 						      field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
 			curr_inx++;
@@ -654,7 +683,8 @@ extern int cluster_user_by_account(int argc, char *argv[])
 			user_itr = list_iterator_create(
 				sreport_cluster->user_list); 
 			while((sreport_user = list_next(user_itr))) {
-				if(!strcmp(sreport_user->acct, assoc->acct)) 
+				if(!strcmp(sreport_user->name, assoc->user)
+				   && !strcmp(sreport_user->acct, assoc->acct))
 					break;				
 			}
 			list_iterator_destroy(user_itr);
@@ -676,7 +706,8 @@ extern int cluster_user_by_account(int argc, char *argv[])
 	
 				list_append(sreport_cluster->user_list,
 					    sreport_user);
-			}
+			} 
+
 			/* get the amount of time this assoc used
 			   during the time we are looking at */
 			itr2 = list_iterator_create(assoc->accounting_list);
@@ -700,15 +731,14 @@ extern int cluster_user_by_account(int argc, char *argv[])
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = assoc_cond->usage_start;
 		time_t my_end = assoc_cond->usage_end-1;
 		
-		slurm_make_time_str(
-			(time_t *)&assoc_cond->usage_start, 
-			start_char, sizeof(start_char));
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
 		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
-		printf("User Acct Utilization by Cluster %s - %s (%d secs)\n", 
+		printf("Cluster/User/Account Utilization %s - %s (%d secs)\n", 
 		       start_char, end_char, 
 		       (assoc_cond->usage_end - assoc_cond->usage_start));
 		
@@ -786,6 +816,9 @@ extern int cluster_user_by_account(int argc, char *argv[])
 						(curr_inx == field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 				curr_inx++;
@@ -961,15 +994,14 @@ extern int cluster_account_by_user(int argc, char *argv[])
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = assoc_cond->usage_start;
 		time_t my_end = assoc_cond->usage_end-1;
 		
-		slurm_make_time_str(
-			(time_t *)&assoc_cond->usage_start, 
-			start_char, sizeof(start_char));
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
 		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
-		printf("Acct User Utilization by Cluster %s - %s (%d secs)\n", 
+		printf("Cluster/Account/User Utilization %s - %s (%d secs)\n", 
 		       start_char, end_char, 
 		       (assoc_cond->usage_end - assoc_cond->usage_start));
 		
@@ -1080,6 +1112,9 @@ extern int cluster_account_by_user(int argc, char *argv[])
 						(curr_inx == field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 				curr_inx++;
diff --git a/src/sreport/common.c b/src/sreport/common.c
index affd1f80869..254f99d40ca 100644
--- a/src/sreport/common.c
+++ b/src/sreport/common.c
@@ -223,20 +223,24 @@ extern void addto_char_list(List char_list, char *names)
 extern int set_start_end_time(time_t *start, time_t *end)
 {
 	time_t my_time = time(NULL);
+	time_t temp_time;
 	struct tm start_tm;
 	struct tm end_tm;
+	int sent_start = (*start), sent_end = (*end);
 
+//	info("now got %d and %d sent", (*start), (*end));
 	/* Default is going to be the last day */
-	if(!(*end)) {
+	if(!sent_end) {
 		if(!localtime_r(&my_time, &end_tm)) {
 			error("Couldn't get localtime from end %d",
 			      my_time);
 			return SLURM_ERROR;
 		}
 		end_tm.tm_hour = 0;
-		(*end) = mktime(&end_tm);		
+		//(*end) = mktime(&end_tm);		
 	} else {
-		if(!localtime_r(end, &end_tm)) {
+		temp_time = sent_end;
+		if(!localtime_r(&temp_time, &end_tm)) {
 			error("Couldn't get localtime from user end %d",
 			      my_time);
 			return SLURM_ERROR;
@@ -247,7 +251,7 @@ extern int set_start_end_time(time_t *start, time_t *end)
 	end_tm.tm_isdst = -1;
 	(*end) = mktime(&end_tm);		
 
-	if(!(*start)) {
+	if(!sent_start) {
 		if(!localtime_r(&my_time, &start_tm)) {
 			error("Couldn't get localtime from start %d",
 			      my_time);
@@ -255,9 +259,10 @@ extern int set_start_end_time(time_t *start, time_t *end)
 		}
 		start_tm.tm_hour = 0;
 		start_tm.tm_mday--;
-		(*start) = mktime(&start_tm);		
+		//(*start) = mktime(&start_tm);		
 	} else {
-		if(!localtime_r(start, &start_tm)) {
+		temp_time = sent_start;
+		if(!localtime_r(&temp_time, &start_tm)) {
 			error("Couldn't get localtime from user start %d",
 			      my_time);
 			return SLURM_ERROR;
@@ -270,6 +275,7 @@ extern int set_start_end_time(time_t *start, time_t *end)
 
 	if((*end)-(*start) < 3600) 
 		(*end) = (*start) + 3600;
+//	info("now got %d and %d sent", (*start), (*end));
 
 	return SLURM_SUCCESS;
 }
@@ -315,17 +321,19 @@ extern void destroy_sreport_cluster_rec(void *object)
 /* 
  * Comparator used for sorting users largest cpu to smallest cpu
  * 
- * returns: -1: user_a > user_b   0: user_a == user_b   1: user_a < user_b
+ * returns: 1: user_a > user_b   0: user_a == user_b   -1: user_a < user_b
  * 
  */
 extern int sort_user_dec(sreport_user_rec_t *user_a, sreport_user_rec_t *user_b)
 {
 	int diff = 0;
 
-	if (user_a->cpu_secs > user_b->cpu_secs)
-		return -1;
-	else if (user_a->cpu_secs < user_b->cpu_secs)
-		return 1;
+	if(sort_flag == SREPORT_SORT_TIME) {
+		if (user_a->cpu_secs > user_b->cpu_secs)
+			return -1;
+		else if (user_a->cpu_secs < user_b->cpu_secs)
+			return 1;
+	}
 
 	if(!user_a->name || !user_b->name)
 		return 0;
@@ -333,9 +341,9 @@ extern int sort_user_dec(sreport_user_rec_t *user_a, sreport_user_rec_t *user_b)
 	diff = strcmp(user_a->name, user_b->name);
 
 	if (diff > 0)
-		return -1;
-	else if (diff < 0)
 		return 1;
+	else if (diff < 0)
+		return -1;
 	
 	return 0;
 }
@@ -343,9 +351,9 @@ extern int sort_user_dec(sreport_user_rec_t *user_a, sreport_user_rec_t *user_b)
 /* 
  * Comparator used for sorting clusters alphabetically
  * 
- * returns: -1: cluster_a > cluster_b   
+ * returns: 1: cluster_a > cluster_b   
  *           0: cluster_a == cluster_b
- *           1: cluster_a < cluster_b
+ *           -1: cluster_a < cluster_b
  * 
  */
 extern int sort_cluster_dec(sreport_cluster_rec_t *cluster_a,
@@ -359,9 +367,9 @@ extern int sort_cluster_dec(sreport_cluster_rec_t *cluster_a,
 	diff = strcmp(cluster_a->name, cluster_b->name);
 
 	if (diff > 0)
-		return -1;
-	else if (diff < 0)
 		return 1;
+	else if (diff < 0)
+		return -1;
 	
 	return 0;
 }
@@ -387,21 +395,21 @@ extern int sort_assoc_dec(sreport_assoc_rec_t *assoc_a,
 	diff = strcmp(assoc_a->acct, assoc_b->acct);
 
 	if (diff > 0)
-		return -1;
-	else if (diff < 0)
 		return 1;
+	else if (diff < 0)
+		return -1;
 	
 	if(!assoc_a->user && assoc_b->user)
-		return -1;
-	else if(!assoc_b->user)
 		return 1;
+	else if(!assoc_b->user)
+		return -1;
 
 	diff = strcmp(assoc_a->user, assoc_b->user);
 
 	if (diff > 0)
-		return -1;
-	else if (diff < 0)
 		return 1;
+	else if (diff < 0)
+		return -1;
 	
 
 	return 0;
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 986fca6a249..8c6e56ed2f4 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -80,6 +80,7 @@ enum {
 
 static List print_fields_list = NULL; /* types are of print_field_t */
 static List grouping_print_fields_list = NULL; /* types are of print_field_t */
+static int print_job_count = 0;
 
 static void _destroy_local_grouping(void *object)
 {
@@ -223,6 +224,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
 
 	if(!job_cond->cluster_list)
 		job_cond->cluster_list = list_create(slurm_destroy_char);
@@ -237,6 +239,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
 			local_cluster_flag = 1;
 			continue;
+		} else if(!end && !strncasecmp(argv[i], "PrintJobCount", 2)) {
+			print_job_count = 1;
+			continue;
 		} else if(!end 
 			  || !strncasecmp (argv[i], "Clusters", 1)) {
 			slurm_addto_char_list(job_cond->cluster_list,
@@ -247,14 +252,14 @@ static int _set_cond(int *start, int argc, char *argv[],
 				job_cond->acct_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->acct_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Associations", 2)) {
 			if(!job_cond->associd_list)
 				job_cond->associd_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->associd_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", 1)) {
 			job_cond->usage_end = parse_time(argv[i]+end, 1);
@@ -267,7 +272,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 				job_cond->groupid_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->groupid_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "grouping", 2)) {
 			if(grouping_list)
@@ -285,7 +290,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			       && start_char) {
 				*end_char = 0;
 				while (isspace(*start_char))
-					start_char++;	/* discard whitespace */
+					start_char++;  /* discard whitespace */
 				if(!(int)*start_char)
 					continue;
 				selected_step = xmalloc(
@@ -305,12 +310,12 @@ static int _set_cond(int *start, int argc, char *argv[],
 			}
 			
 			set = 1;
-		} else if (!strncasecmp (argv[i], "Partitions", 1)) {
+		} else if (!strncasecmp (argv[i], "Partitions", 2)) {
 			if(!job_cond->partition_list)
 				job_cond->partition_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->partition_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Start", 1)) {
 			job_cond->usage_start = parse_time(argv[i]+end, 1);
@@ -325,7 +330,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
-			       "Use keyword set to modify value\n", argv[i]);
+				"Use keyword set to modify value\n", argv[i]);
 		}
 	}
 	(*start) = i;
@@ -336,8 +341,15 @@ static int _set_cond(int *start, int argc, char *argv[],
 			list_append(job_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&job_cond->usage_start,
-			   (time_t *)&job_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = job_cond->usage_start;
+	end_time = job_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	job_cond->usage_start = start_time;
+	job_cond->usage_end = end_time;
 
 	return set;
 }
@@ -373,12 +385,7 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
-		} else if(!strncasecmp("Count", object, 2)) {
-			field->type = PRINT_JOB_COUNT;
-			field->name = xstrdup("Job Count");
-			field->len = 9;
-			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("cpu_count", object, 2)) {
+		} else if(!strncasecmp("CpuCount", object, 2)) {
 			field->type = PRINT_JOB_CPUS;
 			field->name = xstrdup("CPU Count");
 			field->len = 9;
@@ -388,7 +395,12 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Duration");
 			field->len = 12;
 			field->print_routine = print_fields_time;
-		} else if(!strncasecmp("node_count", object, 2)) {
+		} else if(!strncasecmp("JobCount", object, 2)) {
+			field->type = PRINT_JOB_COUNT;
+			field->name = xstrdup("Job Count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("NodeCount", object, 2)) {
 			field->type = PRINT_JOB_NODES;
 			field->name = xstrdup("Node Count");
 			field->len = 9;
@@ -440,8 +452,10 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 	while((object = list_next(itr))) {
 		field = xmalloc(sizeof(print_field_t));
 		size = atoi(object);
-
-		field->type = PRINT_JOB_SIZE;
+		if(print_job_count)
+			field->type = PRINT_JOB_COUNT;
+		else
+			field->type = PRINT_JOB_SIZE;
 		field->name = xstrdup_printf("%u-%u cpus", last_size, size-1);
 		if(time_format == SREPORT_TIME_SECS_PER
 		   || time_format == SREPORT_TIME_MINS_PER
@@ -450,7 +464,10 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 		else
 			field->len = 13;
 
-		field->print_routine = sreport_print_time;
+		if(print_job_count)
+			field->print_routine = print_fields_uint;
+		else
+			field->print_routine = sreport_print_time;
 		last_size = size;
 		last_object = object;
 		if((tmp_char = strstr(object, "\%"))) {
@@ -464,7 +481,10 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 
 	if(last_size) {
 		field = xmalloc(sizeof(print_field_t));
-		field->type = PRINT_JOB_SIZE;
+		if(print_job_count)
+			field->type = PRINT_JOB_COUNT;
+		else
+			field->type = PRINT_JOB_SIZE;
 		field->name = xstrdup_printf("> %u cpus", last_size);
 		if(time_format == SREPORT_TIME_SECS_PER
 		   || time_format == SREPORT_TIME_MINS_PER
@@ -472,7 +492,10 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 			field->len = 20;
 		else
 			field->len = 13;
-		field->print_routine = sreport_print_time;
+		if(print_job_count)
+			field->print_routine = print_fields_uint;
+		else
+			field->print_routine = sreport_print_time;
 		if((tmp_char = strstr(last_object, "\%"))) {
 			int newlen = atoi(tmp_char+1);
 			if(newlen > 0) 
@@ -514,6 +537,8 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	List cluster_list = NULL;
 	List assoc_list = NULL;
 
+	List tmp_acct_list = NULL;
+
 	List format_list = list_create(slurm_destroy_char);
 	List grouping_list = list_create(slurm_destroy_char);
 
@@ -536,7 +561,16 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 
 	_setup_grouping_print_fields_list(grouping_list);
 
+	/* we don't want to actually query by accounts in the jobs
+	   here since we may be looking for sub accounts of a specific
+	   account.
+	*/
+	tmp_acct_list = job_cond->acct_list;
+	job_cond->acct_list = NULL;
 	job_list = jobacct_storage_g_get_jobs_cond(db_conn, my_uid, job_cond);
+	job_cond->acct_list = tmp_acct_list;
+	tmp_acct_list = NULL;
+
 	if(!job_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem with job query.\n");
@@ -544,12 +578,15 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	}
 
 	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
-	assoc_cond.acct_list = job_cond->acct_list;
 	assoc_cond.id_list = job_cond->associd_list;
 	assoc_cond.cluster_list = job_cond->cluster_list;
 	assoc_cond.partition_list = job_cond->partition_list;
-	assoc_cond.parent_acct_list = list_create(NULL);
-	list_append(assoc_cond.parent_acct_list, "root");
+	if(!job_cond->acct_list || !list_count(job_cond->acct_list)) {
+		job_cond->acct_list = list_create(NULL);
+		list_append(job_cond->acct_list, "root");
+	}
+	assoc_cond.parent_acct_list = job_cond->acct_list;	
+	
 
 	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
 						     &assoc_cond);
@@ -557,18 +594,20 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = job_cond->usage_start;
 		time_t my_end = job_cond->usage_end-1;
 
-		slurm_make_time_str((time_t *)&job_cond->usage_start, 
-				    start_char, sizeof(start_char));
-		slurm_make_time_str(&my_end,
-				    end_char, sizeof(end_char));
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 		printf("Job Sizes %s - %s (%d secs)\n", 
 		       start_char, end_char, 
 		       (job_cond->usage_end - job_cond->usage_start));
-		printf("Time reported in %s\n", time_format_string);
+		if(print_job_count)
+			printf("Units are in number of jobs ran\n");
+		else
+			printf("Time reported in %s\n", time_format_string);
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
@@ -655,7 +694,6 @@ no_assocs:
 	while((job = list_next(itr))) {
 		char *local_cluster = "UNKNOWN";
 		char *local_account = "UNKNOWN";
-		char *group;
 
 		if(!job->elapsed) {
 			/* here we don't care about jobs that didn't
@@ -667,17 +705,22 @@ no_assocs:
 		if(job->account) 
 			local_account = job->account;
 
+		list_iterator_reset(cluster_itr);
 		while((cluster_group = list_next(cluster_itr))) {
 			if(!strcmp(local_cluster, cluster_group->cluster)) 
 				break;
 		}
 		if(!cluster_group) {
-			cluster_group = 
-				xmalloc(sizeof(cluster_grouping_t));
-			cluster_group->cluster = xstrdup(local_cluster);
-			cluster_group->acct_list =
-				list_create(_destroy_acct_grouping);
-			list_append(cluster_list, cluster_group);
+			/* here we are only looking for groups that
+			 * were added with the associations above
+			 */
+			continue;
+/* 			cluster_group =  */
+/* 				xmalloc(sizeof(cluster_grouping_t)); */
+/* 			cluster_group->cluster = xstrdup(local_cluster); */
+/* 			cluster_group->acct_list = */
+/* 				list_create(_destroy_acct_grouping); */
+/* 			list_append(cluster_list, cluster_group); */
 		}
 
 		acct_itr = list_iterator_create(cluster_group->acct_list);
@@ -696,29 +739,34 @@ no_assocs:
 		list_iterator_destroy(acct_itr);		
 			
 		if(!acct_group) {
-			uint32_t last_size = 0;
-			acct_group = xmalloc(sizeof(acct_grouping_t));
-			acct_group->acct = xstrdup(local_account);
-			acct_group->groups =
-				list_create(_destroy_local_grouping);
-			list_append(cluster_group->acct_list, acct_group);
-
-			while((group = list_next(group_itr))) {
-				local_group = xmalloc(sizeof(local_grouping_t));
-				local_group->jobs = list_create(NULL);
-				local_group->min_size = last_size;
-				last_size = atoi(group);
-				local_group->max_size = last_size-1;
-				list_append(acct_group->groups, local_group);
-			}
-			if(last_size) {
-				local_group = xmalloc(sizeof(local_grouping_t));
-				local_group->jobs = list_create(NULL);
-				local_group->min_size = last_size;
-				local_group->max_size = INFINITE;
-				list_append(acct_group->groups, local_group);
-			}
-			list_iterator_reset(group_itr);
+			//char *group = NULL;
+			//uint32_t last_size = 0;
+			/* here we are only looking for groups that
+			 * were added with the associations above
+			 */
+			continue;
+/* 			acct_group = xmalloc(sizeof(acct_grouping_t)); */
+/* 			acct_group->acct = xstrdup(local_account); */
+/* 			acct_group->groups = */
+/* 				list_create(_destroy_local_grouping); */
+/* 			list_append(cluster_group->acct_list, acct_group); */
+
+/* 			while((group = list_next(group_itr))) { */
+/* 				local_group = xmalloc(sizeof(local_grouping_t)); */
+/* 				local_group->jobs = list_create(NULL); */
+/* 				local_group->min_size = last_size; */
+/* 				last_size = atoi(group); */
+/* 				local_group->max_size = last_size-1; */
+/* 				list_append(acct_group->groups, local_group); */
+/* 			} */
+/* 			if(last_size) { */
+/* 				local_group = xmalloc(sizeof(local_grouping_t)); */
+/* 				local_group->jobs = list_create(NULL); */
+/* 				local_group->min_size = last_size; */
+/* 				local_group->max_size = INFINITE; */
+/* 				list_append(acct_group->groups, local_group); */
+/* 			} */
+/* 			list_iterator_reset(group_itr); */
 		}
 
 		local_itr = list_iterator_create(acct_group->groups);
@@ -736,8 +784,6 @@ no_assocs:
 			cluster_group->cpu_secs += total_secs;
 		}
 		list_iterator_destroy(local_itr);		
-
-		list_iterator_reset(cluster_itr);
 	}
 	list_iterator_destroy(group_itr);
 	list_destroy(grouping_list);
@@ -747,6 +793,7 @@ no_assocs:
 	
 	itr = list_iterator_create(print_fields_list);
 	itr2 = list_iterator_create(grouping_print_fields_list);
+	list_iterator_reset(cluster_itr);
 	while((cluster_group = list_next(cluster_itr))) {
 		acct_itr = list_iterator_create(cluster_group->acct_list);
 		while((acct_group = list_next(acct_itr))) {
@@ -764,6 +811,9 @@ no_assocs:
 							     0);
 					break;
 				default:
+					field->print_routine(field,
+							     NULL,
+							     0);
 					break;
 				}
 			}
@@ -776,9 +826,19 @@ no_assocs:
 					field->print_routine(
 						field,
 						local_group->cpu_secs,
-						acct_group->cpu_secs);
+						acct_group->cpu_secs,
+						0);
+					break;
+				case PRINT_JOB_COUNT:
+					field->print_routine(
+						field,
+						local_group->count,
+						0);
 					break;
 				default:
+					field->print_routine(field,
+							     NULL,
+							     0);
 					break;
 				}
 			}
@@ -800,6 +860,8 @@ no_assocs:
 //	time_format = temp_time_format;
 
 end_it:
+	if(print_job_count)
+		print_job_count = 0;
 
 	destroy_acct_job_cond(job_cond);
 	
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index f2aa97a51a6..84793d6bf7d 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -56,6 +56,7 @@ sreport_time_format_t time_format = SREPORT_TIME_MINS;
 char *time_format_string = "Minutes";
 void *db_conn = NULL;
 uint32_t my_uid = 0;
+sreport_sort_t sort_flag = SREPORT_SORT_TIME;
 
 static void	_job_rep (int argc, char *argv[]);
 static void	_user_rep (int argc, char *argv[]);
@@ -65,6 +66,7 @@ static int	_get_command (int *argc, char *argv[]);
 static void     _print_version( void );
 static int	_process_command (int argc, char *argv[]);
 static int      _set_time_format(char *format);
+static int      _set_sort(char *format);
 static void	_usage ();
 
 int 
@@ -83,6 +85,7 @@ main (int argc, char *argv[])
 		{"parsable", 0, 0, 'p'},
 		{"parsable2", 0, 0, 'P'},
 		{"quiet",    0, 0, 'q'},
+		{"sort",    0, 0, 's'},
 		{"usage",    0, 0, 'h'},
 		{"verbose",  0, 0, 'v'},
 		{"version",  0, 0, 'V'},
@@ -96,7 +99,7 @@ main (int argc, char *argv[])
 	quiet_flag        = 0;
 	log_init("sreport", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
-	while((opt_char = getopt_long(argc, argv, "ahnpPqt:vV",
+	while((opt_char = getopt_long(argc, argv, "ahnpPqs:t:vV",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
@@ -125,6 +128,9 @@ main (int argc, char *argv[])
 		case (int)'q':
 			quiet_flag = 1;
 			break;
+		case (int)'s':
+			_set_sort(optarg);
+			break;
 		case (int)'t':
 			_set_time_format(optarg);
 			break;
@@ -439,6 +445,14 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}
 		exit_flag = 1;
+	} else if (strncasecmp (argv[0], "sort", 1) == 0) {
+		if (argc < 2) {
+			exit_code = 1;
+			fprintf (stderr,
+				 "too few arguments for keyword:%s\n",
+				 argv[0]);
+		} else		
+			_set_sort(argv[1]);
 	} else if (strncasecmp (argv[0], "time", 1) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
@@ -511,6 +525,20 @@ static int _set_time_format(char *format)
 	return SLURM_SUCCESS;
 }
 
+static int _set_sort(char *format)
+{
+	if (strncasecmp (format, "Name", 1) == 0) {
+		sort_flag = SREPORT_SORT_NAME;
+	} else if (strncasecmp (format, "Time", 6) == 0) {
+		sort_flag = SREPORT_SORT_TIME;
+	} else {
+		fprintf (stderr, "unknown timesort format %s", format);	
+		return SLURM_ERROR;
+	}
+
+	return SLURM_SUCCESS;
+}
+
 
 /* _usage - show the valid sreport commands */
 void _usage () {
@@ -549,7 +577,7 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
      user <REPORT> <OPTIONS>                                               \n\
                                                                            \n\
   <REPORT> is different for each report type.                              \n\
-     cluster - Utilization                                                 \n\
+     cluster - AccountUtilizationByUser, UserUtilizationByAccount, Utilization\n\
      job     - Sizes                                                       \n\
      user    - TopUsage                                                    \n\
                                                                            \n\
@@ -567,6 +595,9 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                                                            \n\
      cluster - Names=<OPT>      - List of clusters to include in report    \n\
                                   Default is local cluster.                \n\
+             - Tree             - When used with the AccountUtilizationByUser\n\
+                                  report will span the accounts as they    \n\
+                                  in the hierarchy.                        \n\
                                                                            \n\
      job     - Accounts=<OPT>   - List of accounts to use for the report   \n\
                                   Default is all.                          \n\
@@ -581,10 +612,14 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                   Default is all.                          \n\
              - Partitions=<OPT> - List of partitions jobs ran on to include\n\
                                   in report.  Default is all.              \n\
+             - PrintJobCount    - When used with the Sizes report will print\n\
+                                  number of jobs ran instead of time used. \n\
              - Users=<OPT>      - List of users jobs to include in report. \n\
                                   Default is all.                          \n\
                                                                            \n\
-     user    - Clusters=<OPT>   - List of clusters to include in report.   \n\
+     user    - Accounts=<OPT>   - List of accounts to use for the report   \n\
+                                  Default is all.                          \n\
+             - Clusters=<OPT>   - List of clusters to include in report.   \n\
                                   Default is local cluster.                \n\
              - Group            - Group all accounts together for each user.\n\
                                   Default is a separate entry for each user\n\
diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h
index aa35084d654..05ff40cfe41 100644
--- a/src/sreport/sreport.h
+++ b/src/sreport/sreport.h
@@ -96,6 +96,11 @@ typedef enum {
 	SREPORT_TIME_HOURS_PER,
 } sreport_time_format_t;
 
+typedef enum {
+	SREPORT_SORT_TIME,
+	SREPORT_SORT_NAME
+} sreport_sort_t;
+
 typedef struct {
 	char *acct;
 	char *cluster;
@@ -131,6 +136,7 @@ extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
 extern void *db_conn;
 extern uint32_t my_uid;
 extern int all_clusters_flag;
+extern sreport_sort_t sort_flag;
 
 extern void sreport_print_time(print_field_t *field,
 			       uint64_t value, uint64_t total_time, int last);
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
index 50fd0d73401..aeb7fbd7169 100644
--- a/src/sreport/user_reports.c
+++ b/src/sreport/user_reports.c
@@ -59,6 +59,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
 	acct_association_cond_t *assoc_cond = NULL;
+	time_t start_time, end_time;
 	
 	if(!user_cond) {
 		error("We need an acct_user_cond to call this");
@@ -101,11 +102,11 @@ static int _set_cond(int *start, int argc, char *argv[],
 				assoc_cond->acct_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(assoc_cond->acct_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
 			slurm_addto_char_list(assoc_cond->cluster_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", 1)) {
 			assoc_cond->usage_end = parse_time(argv[i]+end, 1);
@@ -119,7 +120,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
-			       "Use keyword set to modify value\n", argv[i]);
+				"Use keyword set to modify value\n", argv[i]);
 		}
 	}
 	(*start) = i;
@@ -130,8 +131,15 @@ static int _set_cond(int *start, int argc, char *argv[],
 			list_append(assoc_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&assoc_cond->usage_start,
-			   (time_t *)&assoc_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = assoc_cond->usage_start;
+	end_time = assoc_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	assoc_cond->usage_start = start_time;
+	assoc_cond->usage_end = end_time;
 
 	return set;
 }
@@ -246,13 +254,11 @@ extern int user_top(int argc, char *argv[])
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = user_cond->assoc_cond->usage_start;
 		time_t my_end = user_cond->assoc_cond->usage_end-1;
 
-		slurm_make_time_str(
-			(time_t *)&user_cond->assoc_cond->usage_start, 
-			start_char, sizeof(start_char));
-		slurm_make_time_str(&my_end,
-				    end_char, sizeof(end_char));
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 		printf("Top %u Users %s - %s (%d secs)\n", 
@@ -441,6 +447,9 @@ extern int user_top(int argc, char *argv[])
 						(curr_inx == field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 				curr_inx++;
diff --git a/testsuite/expect/test21.20 b/testsuite/expect/test21.20
index 519bb812098..29586fe0ec0 100755
--- a/testsuite/expect/test21.20
+++ b/testsuite/expect/test21.20
@@ -89,6 +89,7 @@ set mj		maxjob
 set mn		maxnode
 set mw		maxwall
 set dbu		debug
+set was		withassoc
 set access_err  0
 
 #set user_name   "id -u -n"
@@ -958,7 +959,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test qos additions
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3]
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -978,7 +979,7 @@ expect {
 	}
 }
 
-if {$matches != 3} {
+if {$matches != 27} {
 	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
 	incr exit_code 1
 }
@@ -1000,7 +1001,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test qos modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3]
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -1024,7 +1025,7 @@ expect {
 	}
 }
 
-if {$matches != 3} {
+if {$matches != 21} {
 	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
 	incr exit_code 1
 }
@@ -1046,7 +1047,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test qos modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3]
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -1074,7 +1075,7 @@ expect {
 	}
 }
 
-if {$matches != 3} {
+if {$matches != 15} {
 	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
 	incr exit_code 1
 }
@@ -1084,7 +1085,7 @@ if {$matches != 3} {
 # account adminlevel cluster defaultaccount qoslevel name
 # qoslevel waccounts wcluster wnames
 #
-incr exit_code [_mod_user "-=$qs1" "$nm2" "$tc1,$tc2,$tc3" $us3]
+incr exit_code [_mod_user "-=$qs2" "$nm2" "$tc1,$tc2,$tc3" $us3]
 if { $exit_code } {
 	_remove_user "" "$us1,$us2,$us3"
 	_remove_acct "" "$nm1,$nm2,$nm3"
@@ -1097,7 +1098,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test qos modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3]
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -1111,7 +1112,7 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "$us3.$qs2." {
+	-re "$us3.$qs1." {
 		incr matches
 		exp_continue
 	}
@@ -1125,7 +1126,7 @@ expect {
 	}
 }
 
-if {$matches != 3} {
+if {$matches != 9} {
 	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
 	incr exit_code 1
 }
-- 
GitLab