diff --git a/NEWS b/NEWS
index 2a31c967dbc75c62c7f118ecc57ea7c65ffd8360..7d9cd063883ba6aca888c6b0b3c98623925e764b 100644
--- a/NEWS
+++ b/NEWS
@@ -77,6 +77,23 @@ documents those changes that are of interest to users and admins.
  -- Handle complete removal of CPURunMins time at the end of the job instead
     of at multifactor poll.
  -- sview - Add missing debug_flag options.
+ -- PGSQL - Notes about Postgres functionality being removed in the next
+    version of Slurm.
+ -- MYSQL - fix issue when rolling up usage and events happened when a cluster
+    was down (slurmctld not running) during that time period.
+ -- sched/wiki2 - Insure that Moab gets current CPU load information.
+ -- Prevent infinite loop in parsing configuration if including file containing
+    one blank line.
+ -- Fix pack and unpack between 2.6 and 2.5.
+ -- Fix job state recovery logic in which a job's accounting frequency was
+    not set. This would result in a value of 65534 seconds being used (the
+    equivalent of NO_VAL in uint16_t), which could result in the job being
+    requeued or aborted.
+ -- Validate a job's accounting frequency at submission time rather than
+    waiting for it's initiation to possibly fail.
+ -- Fix CPURunMins if a job is requeued from a failed launch.
+ -- Fix in accounting_storage/filetxt to correct start times which sometimes
+    could end up before the job started.
 
 * Changes in Slurm 2.6.0
 ========================
@@ -247,6 +264,7 @@ documents those changes that are of interest to users and admins.
  -- Select/cons_res - Correct total CPU count allocated to a job with
     --exclusive and --cpus-per-task options
  -- switch/nrt - Don't allocate network resources unless job step has 2+ nodes.
+ -- select/cons_res - Avoid extraneous "oversubscribe" error messages.
 
 * Changes in Slurm 2.5.7
 ========================
diff --git a/contribs/slurm_completion_help/README.md b/contribs/slurm_completion_help/README.md
index 373263c6fbf76aa16a693b2f364f69b8dec95e72..eb8d5ff9a4ca9b53433a1d31e8c870d347c6ec21 100644
--- a/contribs/slurm_completion_help/README.md
+++ b/contribs/slurm_completion_help/README.md
@@ -30,6 +30,10 @@ __Known issues__
 
 * Some regex needed to validate options or parameter values are not exactly correct, but should work in most cases.
 * Any new option unknown to the syntax file will be spotted as an error.
+* On a Debian system (Ubuntu) you may see messages like...
+  _get_comp_words_by_ref: command not found
+  after a tab.
+  Based on http://askubuntu.com/questions/33440/tab-completion-doesnt-work-for-commands you need to alter your /etc/bash.bashrc to make this work correctly.
 
 Bash completion
 ---------------
diff --git a/contribs/torque/qsub.pl b/contribs/torque/qsub.pl
index ff4149090d8d0009a3a2101d00cdd85007671e0b..eac568490e399822d08f01241ce055bec7ca4e03 100755
--- a/contribs/torque/qsub.pl
+++ b/contribs/torque/qsub.pl
@@ -210,8 +210,8 @@ $command .= " -C $additional_attributes" if $additional_attributes;
 
 $command .= " $script";
 
-system($command);
-
+my $ret = system($command);
+exit ($ret >> 8);
 
 sub parse_resource_list {
 	my ($rl) = @_;
diff --git a/doc/html/checkpoint_blcr.shtml b/doc/html/checkpoint_blcr.shtml
index c5ac40abfbc696e9913ac5204258106f9b9b12ca..1ba135a3a89209e562b946352bbb484c390bc243 100644
--- a/doc/html/checkpoint_blcr.shtml
+++ b/doc/html/checkpoint_blcr.shtml
@@ -61,7 +61,7 @@ files of a job step will be read from</li>
 
 <p>Environment variables are available for all of these options:</p>
 <ul>
-<li<b>SLURM_CHECKPOINT</b> is equivalent to <b>--checkpoint</b>:</li>
+<li><b>SLURM_CHECKPOINT</b> is equivalent to <b>--checkpoint</b>:</li>
 <li><b>SLURM_CHECKPOINT_DIR</b> is equivalent to <b>--checkpoint-dir</b></li>
 <li><b>SLURM_RESTART_DIR</b> is equivalent to <b>--restart-dir</b></li>
 </li>
@@ -193,6 +193,6 @@ option at job submit time or scontrol's <b>ImageDir</b> option.
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 20 June 2012</p>
+<p style="text-align:center;">Last modified 12 August 2013</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/cons_res.shtml b/doc/html/cons_res.shtml
index 91c7fe53fdbe9185c455d38bcee02eccaebacc6d..196d45bf74962237f6cd4731236753a5927362d4 100644
--- a/doc/html/cons_res.shtml
+++ b/doc/html/cons_res.shtml
@@ -1,213 +1,108 @@
 <!--#include virtual="header.txt"-->
 
-<h1><a name="top">Consumable Resources in SLURM</a></h1>
+<h1><a name="top">Consumable Resources in Slurm</a></h1>
 
-<p>SLURM, using the default node allocation plug-in, allocates nodes to jobs in
+<p>Slurm, using the default node allocation plug-in, allocates nodes to jobs in
 exclusive mode.  This means that even when all the resources within a node are
 not utilized by a given job, another job will not have access to these resources.
 Nodes possess resources such as processors, memory, swap, local
 disk, etc. and jobs consume these resources. The exclusive use default policy
-in SLURM can result in inefficient utilization of the cluster and of its nodes
+in Slurm can result in inefficient utilization of the cluster and of its nodes
 resources.
+Slurm's <i>cons_res</i> or consumable resource plugin is available to
+manage resources on a much more fine-grained basis as described below.</p>
 
-<p>A plug-in supporting CPUs as a consumable resource is available in
-SLURM 0.5.0 and newer versions of SLURM. Information on how to use
-this plug-in is described below.
-</p>
+<h2>Using the Consumable Resource Allocation Plugin: <b>select/cons_res</b></h2>
 
-<h2>Using the Consumable Resource Node Allocation Plugin: <b>select/cons_res</b></h2>
-
-<ol start=1 type=1>
- <li><b>SLURM version 1.2 and newer</b></li>
+<ul>
+<li>Consumable resources has been enhanced with several new resources
+--namely CPU (same as in previous version), Socket, Core, Memory
+as well as any combination of the logical processors with Memory:</li>
+<ul>
+  <li><b>CPU</b> (<i>CR_CPU</i>): CPU as a consumable resource.</li>
   <ul>
-   <li>Consumable resources has been enhanced with several new resources
-    --namely CPU (same as in previous version), Socket, Core, Memory
-    as well as any combination of the logical processors with Memory:</li>
-   <ul>
-     <li><b>CPU</b> (<i>CR_CPU</i>): CPU as a consumable resource.
-       <ul>
-       <li>No notion of sockets, cores, or threads.</li>
-       <li>On a multi-core system CPUs will be cores.</li>
-       <li>On a multi-core/hyperthread system CPUs will be threads.</li>
-       <li>On a single-core systems CPUs are CPUs. ;-) </li>
-       </ul>
-     <li><b>Socket</b> (<i>CR_Socket</i>): Socket as a consumable
-     resource.</li>
-     <li/><b>Core</b> (<i>CR_Core</i>): Core as a consumable
-     resource.</li>
-     <li><b>Memory</b> (<i>CR_Memory</i>) Memory <u>only</u> as a
-     consumable resource. Note! CR_Memory assumes Shared=Yes</li>
-     <li><b>Socket and Memory</b> (<i>CR_Socket_Memory</i>): Socket
-     and Memory as consumable resources.</li>
-     <li><b>Core and Memory</b> (<i>CR_Core_Memory</i>): Core and
-     Memory as consumable resources.</li>
-     <li><b>CPU and Memory</b> (<i>CR_CPU_Memory</i>) CPU and Memory
-     as consumable resources.</li>
-   </ul>
-  <li>In the cases where Memory is the consumable resource or one of
-      the two consumable resources the <b>RealMemory</b> parameter, which
-      defines a node's amount of real memory in slurm.conf, must be
-      set when FastSchedule=1.
-  <li>srun's <i>-E</i> extension for sockets, cores, and threads are
-      ignored within the node allocation mechanism when CR_CPU or
-      CR_CPU_MEMORY is selected. It is considered to compute the total
-      number of tasks when -n is not specified. </li>
-  <li>A new srun switch <i>--job-mem=MB</i> was added to allow users
-      to specify the maximum amount of real memory per node required
-      by their application. This switch is needed in the environments
-      were Memory is a consumable resource. It is important to specify
-      enough memory since slurmd will not allow the application to use
-      more than the requested amount of real memory per node. The
-      default value for --job-mem is 1 MB. see srun man page for more
-      details.</li>
-  <li><b>All CR_s assume Shared=No</b> or Shared=Force EXCEPT for
-      <b>CR_MEMORY</b> which <b>assumes Shared=Yes</b></li>
-  <li>The consumable resource plugin is enabled via SelectType and
-      SelectTypeParameter in the slurm.conf.</li>
+    <li>No notion of sockets, cores, or threads.</li>
+    <li>On a multi-core system CPUs will be cores.</li>
+    <li>On a multi-core/hyperthread system CPUs will be threads.</li>
+    <li>On a single-core systems CPUs are CPUs. ;-)</li>
+  </ul>
+  <li><b>Board</b> (<i>CR_Board</i>): Baseboard as a consumable resource.</li>
+  <li><b>Socket</b> (<i>CR_Socket</i>): Socket as a consumable resource.</li>
+  <li/><b>Core</b> (<i>CR_Core</i>): Core as a consumable resource.</li>
+  <li><b>Memory</b> (<i>CR_Memory</i>) Memory <u>only</u> as a
+  consumable resource. Note! CR_Memory assumes Shared=Yes</li>
+  <li><b>Socket and Memory</b> (<i>CR_Socket_Memory</i>): Socket
+  and Memory as consumable resources.</li>
+  <li><b>Core and Memory</b> (<i>CR_Core_Memory</i>): Core and
+  Memory as consumable resources.</li>
+  <li><b>CPU and Memory</b> (<i>CR_CPU_Memory</i>) CPU and Memory
+  as consumable resources.</li>
+</ul>
+
+<li>In the cases where Memory is the consumable resource or one of
+the two consumable resources the <b>RealMemory</b> parameter, which
+defines a node's amount of real memory in slurm.conf, must be
+set when FastSchedule=1.</li>
+
+<li>srun's <i>-E</i> extension for sockets, cores, and threads are
+ignored within the node allocation mechanism when CR_CPU or
+CR_CPU_MEMORY is selected. It is considered to compute the total
+number of tasks when -n is not specified.</li>
+
+<li>The job submission commands (salloc, sbatch and srun) support the options
+<i>--mem=MB</i> and <i>--mem-per-cpu=MB</i> permitting users to specify
+the maximum amount of real memory per node or per allocated required.
+This option is required in the environments where Memory is a consumable
+resource. It is important to specify enough memory since Slurm will not allow
+the application to use more than the requested amount of real memory. The
+default value for --mem is 1 MB. see srun man page for more details.</li>
+
+<li><b>All CR_s assume Shared=No</b> or Shared=Force EXCEPT for
+<b>CR_MEMORY</b> which <b>assumes Shared=Yes</b></li>
+
+<li>The consumable resource plugin is enabled via SelectType and
+SelectTypeParameter in the slurm.conf.</li>
+
 <pre>
 #
-# "SelectType"         : node selection logic for scheduling.
-#    "select/bluegene" : the default on BlueGene systems, aware of
-#                        system topology, manages bglblocks, etc.
-#    "select/cons_res" : allocate individual consumable resources
-#                        (i.e. processors, memory, etc.)
-#    "select/linear"   : the default on non-BlueGene systems,
-#                        no topology awareness, oriented toward
-#                        allocating nodes to jobs rather than
-#                        resources within a node (e.g. CPUs)
-#
-# SelectType=select/linear
-SelectType=select/cons_res
+# Excerpts from sample slurm.conf file
 
-# o Define parameters to describe the SelectType plugin. For
-#    - select/bluegene - this parameter is currently ignored
-#    - select/linear   - this parameter is currently ignored
-#    - select/cons_res - the parameters available are
-#       - CR_CPU  (1)  - CPUs as consumable resources.
-#                        No notion of sockets, cores, or threads.
-#                        On a multi-core system CPUs will be cores
-#                        On a multi-core/hyperthread system CPUs
-#                                        will be threads
-#                        On a single-core systems CPUs are CPUs.
-#      - CR_Socket (2) - Sockets as a consumable resource.
-#      - CR_Core   (3) - Cores as a consumable resource.
-#      - CR_Memory (4) - Memory as a consumable resource.
-#                        Note! CR_Memory assumes Shared=Yes
-#      - CR_Socket_Memory (5) - Socket and Memory as consumable
-#                               resources.
-#      - CR_Core_Memory (6)   - Core and Memory as consumable
-#                               resources.
-#      - CR_CPU_Memory (7)    - CPU and Memory as consumable
-#                               resources.
-#
-# (#) refer to the output of "scontrol show config"
-#
-# NB!:   The -E extension for sockets, cores, and threads
-#        are ignored within the node allocation mechanism
-#        when CR_CPU or CR_CPU_MEMORY is selected.
-#        They are considered to compute the total number of
-#        tasks when -n is not specified
-#
-# NB! All CR_s assume Shared=No or Shared=Force EXCEPT for
-#        CR_MEMORY which assumes Shared=Yes
-#
-#SelectTypeParameters=CR_CPU (default)
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core_Memory
 </pre>
-  <li>Using <i>--overcommit</i> or <i>-O</i> is allowed in this new version
-    of consumable resources. When the process to logical processor pinning is
-    enabled (task/affinity plug-in) the extra processes will not affect
-    co-scheduled jobs other than other jobs started with the -O flag.
-    We are currently investigating alternative approaches of handling the
-    pinning of jobs started with <i>--overcommit</i></li>
-  <li><i>-c</i> or <i>--cpus-per-task</i> works in this version of
-    consumable resources</li>
- </ul>
- <li><b>General comments</b></li>
- <ul>
-  <li>SLURM's default <b>select/linear</b> plugin is using a best fit algorithm based on
-    number of consecutive nodes. The same node allocation approach is used in
-    <b>select/cons_res</b> for consistency.</li>
-  <li>The <b>select/cons_res</b> plugin is enabled or disabled cluster-wide.</li>
-  <li>In the case where <b>select/cons_res</b> is not enabled, the normal SLURM behaviors
-    are not disrupted. The only changes, users see when using the <b>select/cons_res</b>
-    plug-in, are that jobs can be co-scheduled on nodes when resources permits it.
-    The rest of SLURM such as srun and switches (except srun -s ...), etc. are not
-    affected by this plugin. SLURM is, from a user point of view, working the same
-    way as when using the default node selection scheme.</li>
-  <li>The <i>--exclusive</i> srun switch allows users to request nodes in
-    exclusive mode even when consumable resources is enabled. see "man srun"
-    for details. </li>
-  <li>srun's <i>-s</i> or <i>--share</i> is incompatible with the consumable resource
-    environment and will therefore not be honored. Since in this environment nodes
-    are shared by default, <i>--exclusive</i> allows users to obtain dedicated nodes.</li>
- </ul>
-</ol>
 
-<p class="footer"><a href="#top">top</a></p>
+<li>Using <i>--overcommit</i> or <i>-O</i> is allowed. When the process to
+logical processor pinning is enabled by using an appropriate TaskPlugin
+configuration parameter, the extra processes will time share the allocated
+resources.</li>
+</ul>
 
-<h2>Limitation and future work</h2>
-
-<p>We are aware of several limitations with the current consumable
-resource plug-in and plan to make enhancement the plug-in as we get
-time as well as request from users to help us prioritize the features.
-
-Please send comments and requests about the consumable resources to
-<a href="mailto:slurm-dev@schedmd.com">slurm-dev@schedmd.com</a>.
-
-<ol start=1 type=1>
-  <li><b>Issue with --max_nodes, --max_sockets_per_node, --max_cores_per_socket and --max_threads_per_core</b></li>
-    <ul>
-      <li><b>Problem:</b> The example below was achieve when using CR_CPU
-      (default mode). The systems are all "dual socket, dual core,
-      single threaded systems (= 4 cpus per system)".</li>
-      <li>The first 3 serial jobs are being allocated to node hydra12
-      which means that one CPU is still available on hydra12.</li>
-      <li>The 4th job "srun -N 2-2 -E 2:2 sleep 100" requires 8 CPUs
-      and since the algorithm fills up nodes in a consecutive order
-      (when not in dedicated mode) the algorithm will want to use the
-      remaining CPUs on Hydra12 first. Because the user has requested
-      a maximum of two nodes the allocation will put the job on
-      hold until hydra12 becomes available or if backfill is enabled
-      until hydra12's remaining CPU gets allocated to another job
-      which will allow the 4th job to get two dedicated nodes</li>
-      <li><b>Note!</b> This problem is fixed in SLURM version 1.3.</li>
-      <li><b>Note!</b> If you want to specify <i>--max_????</i> this
-      problem can be solved in the current implementation by asking
-      for the nodes in dedicated mode using <i>--exclusive</i></li>.
+<h2>General Comments</h2>
 
-<pre>
-# srun sleep 100 &
-# srun sleep 100 &
-# srun sleep 100 &
-# squeue
-JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
- 1132  allNodes  sleep   sballe   R   0:05      1 hydra12
- 1133  allNodes  sleep   sballe   R   0:04      1 hydra12
- 1134  allNodes  sleep   sballe   R   0:02      1 hydra12
-# srun -N 2-2 -E 2:2 sleep 100 &
-srun: job 1135 queued and waiting for resources
-#squeue
-JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
- 1135  allNodes  sleep   sballe  PD   0:00      2 (Resources)
- 1132  allNodes  sleep   sballe   R   0:24      1 hydra12
- 1133  allNodes  sleep   sballe   R   0:23      1 hydra12
- 1134  allNodes  sleep   sballe   R   0:21      1 hydra12
-</pre>
-    <li><b>Proposed solution:</b> Enhance the selection mechanism to go through {node,socket,core,thread}-tuplets to find available match for specific request (bounded knapsack problem). </li>
-    </ul>
-  <li><b>Binding of processes in the case when  <i>--overcommit</i> is specified.</b></li>
-    <ul>
-      <li>In the current implementation (SLURM 1.2) we have chosen not
-      to bind process that have been started with <i>--overcommit</i>
-      flag. The reasoning behind this decision is that the Linux
-      scheduler will move non-bound processes to available resources
-      when jobs with process pinning enabled are started. The
-      non-bound jobs do not affect the bound jobs but co-scheduled
-      non-bound job would affect each others runtime. We have decided
-      that for now this is an adequate solution.
-    </ul>
-  </ul>
-</ol>
+<ul>
+<li>Slurm's default <b>select/linear</b> plugin is using a best fit algorithm
+based on number of consecutive nodes. The same node allocation approach is used
+in <b>select/cons_res</b> for consistency.</li>
+
+<li>The <b>select/cons_res</b> plugin is enabled or disabled cluster-wide.</li>
+
+<li>In the case where <b>select/cons_res</b> is not enabled, the normal Slurm
+behaviors are not disrupted. The only changes, users see when using the
+<b>select/cons_res</b> plugin, are that jobs can be co-scheduled on nodes when
+resources permit it.
+The rest of Slurm, such as srun and switches (except srun -s ...), etc. are not
+affected by this plugin. Slurm is, from a user point of view, working the same
+way as when using the default node selection scheme.</li>
+
+<li>The <i>--exclusive</i> srun switch allows users to request nodes in
+exclusive mode even when consumable resources is enabled. see "man srun"
+for details. </li>
+
+<li>srun's <i>-s</i> or <i>--share</i> is incompatible with the consumable
+resource environment and will therefore not be honored. Since in this
+environment nodes are shared by default, <i>--exclusive</i> allows users to
+obtain dedicated nodes.</li>
+</ul>
 
 <p class="footer"><a href="#top">top</a></p>
 
@@ -282,12 +177,12 @@ JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
 <h2>Example of Node Allocations Using Consumable Resource Plugin</h2>
 
 <p>The following example illustrates the different ways four jobs
-are allocated across a cluster using (1) SLURM's default allocation
+are allocated across a cluster using (1) Slurm's default allocation
 (exclusive mode) and (2) a processor as consumable resource
 approach.</p>
 
 <p>It is important to understand that the example listed below is a
-contrived example and is only given here to illustrate the use of cpu as
+contrived example and is only given here to illustrate the use of CPU as
 consumable resources. Job 2 and Job 3 call for the node count to equal
 the processor count. This would typically be done because
 that one task per node requires all of the memory, disk space, etc. The
@@ -295,12 +190,12 @@ bottleneck would not be processor count.</p>
 
 <p>Trying to execute more than one job per node will almost certainly severely
 impact parallel job's performance.
-The biggest beneficiary of cpus as consumable resources will be serial jobs or
-jobs with modest parallelism, which can effectively share resources. On a lot
-of systems with larger processor count, jobs typically run one fewer task than
+The biggest beneficiary of CPUs as consumable resources will be serial jobs or
+jobs with modest parallelism, which can effectively share resources. On many
+systems with larger processor count, jobs typically run one fewer task than
 there are processors to minimize interference by the kernel and daemons.</p>
 
-<p>The example cluster is composed of 4 nodes (10 cpus in total):</p>
+<p>The example cluster is composed of 4 nodes (10 CPUs in total):</p>
 
 <ul>
  <li>linux01 (with 2 processors), </li>
@@ -322,12 +217,12 @@ there are processors to minimize interference by the kernel and daemons.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<h2>Using SLURM's Default Node Allocation (Non-shared Mode)</h2>
+<h2>Using Slurm's Default Node Allocation (Non-shared Mode)</h2>
 
 <p>The four jobs have been launched and 3 of the jobs are now
 pending, waiting to get resources allocated to them. Only Job 2 is running
-since it uses one cpu on all 4 nodes. This means that linux01 to linux03 each
-have one idle cpu and linux04 has 3 idle cpus.</p>
+since it uses one CPU on all 4 nodes. This means that linux01 to linux03 each
+have one idle CPU and linux04 has 3 idle CPUs.</p>
 
 <pre>
 # squeue
@@ -339,7 +234,7 @@ JOBID PARTITION   NAME   USER  ST   TIME  NODES NODELIST(REASON)
 </pre>
 
 <p>Once Job 2 is finished, Job 3 is scheduled and runs on
-linux01, linux02, and linux03. Job 3 is only using one cpu on each of the 3
+linux01, linux02, and linux03. Job 3 is only using one CPU on each of the 3
 nodes. Job 4 can be allocated onto the remaining idle node (linux04) so Job 3
 and Job 4 can run concurrently on the cluster.</p>
 
@@ -367,30 +262,29 @@ cannot be shared with other jobs.</p>
 
 <p>The output of squeue shows that we
 have 3 out of the 4 jobs allocated and running. This is a 2 running job
-increase over the default SLURM approach.</p>
+increase over the default Slurm approach.</p>
 
 <p> Job 2 is running on nodes linux01
-to linux04. Job 2's allocation is the same as for SLURM's default allocation
-which is that it uses one cpu on each of the 4 nodes. Once Job 2 is scheduled
-and running, nodes linux01, linux02 and linux03 still have one idle cpu each
-and node linux04 has 3 idle cpus. The main difference between this approach and
-the exclusive mode approach described above is that idle cpus within a node
+to linux04. Job 2's allocation is the same as for Slurm's default allocation
+which is that it uses one CPU on each of the 4 nodes. Once Job 2 is scheduled
+and running, nodes linux01, linux02 and linux03 still have one idle CPU each
+and node linux04 has 3 idle CPUs. The main difference between this approach and
+the exclusive mode approach described above is that idle CPUs within a node
 are now allowed to be assigned to other jobs.</p>
 
 <p>It is important to note that
 <i>assigned</i> doesn't mean <i>oversubscription</i>. The consumable resource approach
-tracks how much of each available resource (in our case cpus) must be dedicated
+tracks how much of each available resource (in our case CPUs) must be dedicated
 to a given job. This allows us to prevent per node oversubscription of
-resources (cpus).</p>
+resources (CPUs).</p>
 
 <p>Once Job 2 is running, Job 3 is
-scheduled onto node linux01, linux02, and Linux03 (using one cpu on each of the
-nodes) and Job 4 is scheduled onto one of the remaining idle cpus on Linux04.</p>
+scheduled onto node linux01, linux02, and Linux03 (using one CPU on each of the
+nodes) and Job 4 is scheduled onto one of the remaining idle CPUs on Linux04.</p>
 
 <p>Job 2, Job 3, and Job 4 are now running concurrently on the cluster.</p>
 
 <pre>
-
 # squeue
 JOBID PARTITION   NAME   USER  ST   TIME  NODES NODELIST(REASON)
     5       lsf  sleep   root  PD   0:00      1 (Resources)
@@ -441,11 +335,11 @@ other jobs if they do not use all of the resources on the nodes.</p>
 to specify that they would like their allocated
 nodes in exclusive mode. For more information see "man srun".
 The reason for that is if users have mpi//threaded/openMP
-programs that will take advantage of all the cpus within a node but only need
+programs that will take advantage of all the CPUs within a node but only need
 one mpi process per node.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 3 February 2012</p>
+<p style="text-align:center;">Last modified 14 August 2013</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/reservations.shtml b/doc/html/reservations.shtml
index ea2b901c1eb0c4ec8c41c30041275c789b5360fd..e7a509b6fd7f6f131707bf71630c3984deff4dde 100644
--- a/doc/html/reservations.shtml
+++ b/doc/html/reservations.shtml
@@ -6,14 +6,15 @@
 being executed by select users and/or select bank accounts.
 A resource reservation identifies the resources in that reservation
 and a time period during which the reservation is available.
-The resources which can be reserved include nodes and/or licenses.
+The resources which can be reserved include cores, nodes and/or licenses.
 Note that resource reservations are not compatible with SLURM's
 gang scheduler plugin since the termination time of running jobs
 cannot be accurately predicted.</p>
 
 <p>Note that reserved licenses are treated somewhat differently than reserved
-nodes. When nodes are reserved, then jobs using that reservation can use only
-those nodes and no other jobs can use those nodes. Reserved licenses can only
+cores or nodes. When cores or nodes are reserved, then jobs using that
+reservation can use only those resources and no other jobs can use those
+resources. Reserved licenses can only
 be used by jobs associated with that reservation, but licenses not explicitly
 reserved are available to any job. This eliminates the need to explicitly
 put licenses into every advanced reservation created.</p>
@@ -38,14 +39,13 @@ The "maint" flag is used to identify the reservation for accounting
 purposes as system maintenance.
 The "ignore_jobs" flag is used to indicate that we can ignore currently
 running jobs when creating this reservation.
-By default, only nodes which are not expected to have a running job
+By default, only resources which are not expected to have a running job
 at the start time can be reserved (the time limit of all running
 jobs will have been reached).
 In this case we can manually cancel the running jobs as needed
 to perform system maintenance.
 As the reservation time approaches,
-only jobs that can complete by the reservation time will be
-initiated.</p>
+only jobs that can complete by the reservation time will be initiated.</p>
 <pre>
 $ scontrol create reservation starttime=2009-02-06T16:00:00 \
    duration=120 user=root flags=maint,ignore_jobs nodes=ALL
@@ -148,32 +148,25 @@ ReservationName=alan_8 StartTime=2011-12-05T12:00:00
 </pre>
 
 <p>Note that specific nodes to be associated with the reservation are
-made immediately after creation of the reservation. This permits
+identified immediately after creation of the reservation. This permits
 users to stage files to the nodes in preparation for use during the
 reservation. Note that the reservation creation request can also
 identify the partition from which to select the nodes or _one_
 feature that every selected node must contain.</p>
 
-<p>On a smaller system, one might want to reserve specific CPUs rather than
-whole nodes. While the resolution of SLURM's resource reservation is that of
-whole nodes, one might configure each CPU as a license to SLURM and reserve
-those instead (we understand this is a kludge, but it does provide a way to
-work around this shortcoming in SLURM's code). Proper enforcement then requires
-that each job request one "cpu" license for each CPU to be allocated, which
-can be accomplished by an appropriate job_submit plugin. In the example below,
-we configure the system with one license named "cpu" for each CPU in the
-system, 64 in this example, then create a reservation for 32 CPUs. The
-user developed job_submit plugin would then explicitly set the job's
-licenses field to require one "cpu" for each physical CPU required to satisfy
-the request.</p>
+<p>On a smaller system, one might want to reserve cores rather than
+whole nodes. Slurm provides a core reservation capability in version 2.6.
+This capability permits the administrator to identify the core count to be
+reserved on each node as snown in the examples below.</p>
 <pre>
-$ scontrol show configuration | grep Licenses
-Licenses  = cpu:64
-
-$ scontrol create reservation starttime=2009-04-06T16:00:00 \
-   duration=120 user=bob flags=license_only \
-   licenses=cpu:32
-Reservation created: bob_5
+# Create a two core reservation for user alan
+$ scontrol create reservation StartTime=now Duration=60 \
+  NodeCnt=1 CoreCnt=2 User=alan
+
+# Create a reservation for user brenda with two cores on
+# node tux8 and 4 cores on node tux9
+$ scontrol create reservation StartTime=now Duration=60 \
+  Nodes=tux8,tux9 CoreCnt=2,4 User=brenda
 </pre>
 
 <p>Reservations can not only be created for the use of specific accounts and
@@ -309,20 +302,23 @@ associated with the reservation on an equal basis (e.g. if two users are
 eligible to use a reservation and neither does, each user will be reported
 to have used half of the reserved resources).</p>
 
+<h2>Prolog and Epilog</h2>
+
+<p>Slurm supports both a reservation prolog and epilog.
+They may be configured using the <b>ResvProlog</b> and <b>ResvEpilog</b>
+configuration parameters in the slurm.conf file.
+These scripts can be used to cancel jobs, modify partition configuration,
+etc.</p>
+
 <h2>Future Work</h2>
 
-<p>Several enhancements are anticipated at some point in the future.
-<ol>
-<li>Reservations made within a partition having gang scheduling assumes
+<p>Reservations made within a partition having gang scheduling assumes
 the highest level rather than the actual level of time-slicing when
 considering the initiation of jobs.
 This will prevent the initiation of some jobs which would complete execution
-before a reservation given fewer jobs to time-slice with.</li>
-<li>Add support to reserve specific CPU counts rather than require whole
-nodes be reserved (work around described above).</li>
-</ol>
+before a reservation given fewer jobs to time-slice with.</p>
 
-<p style="text-align: center;">Last modified 29 October 2012</p>
+<p style="text-align: center;">Last modified 13 August 2013</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/resource_limits.shtml b/doc/html/resource_limits.shtml
index cacdadc834233c59437979cbc8eaf377d8dabc5a..eacc9aff2cf42c1304348e590e5a6a0e28536bcf 100644
--- a/doc/html/resource_limits.shtml
+++ b/doc/html/resource_limits.shtml
@@ -2,16 +2,10 @@
 
 <h1>Resource Limits</h1>
 
-<p>SLURM scheduling policy support was significantly changed
-in version 2.0 in order to take advantage of the database
-integration used for storing accounting information.
-This document describes the capabilities available in
-SLURM version 2.0.
-New features are under active development.
-Familiarity with SLURM's <a href="accounting.html">Accounting</a> web page
+<p>Familiarity with SLURM's <a href="accounting.html">Accounting</a> web page
 is strongly recommended before use of this document.</p>
 
-<p>Note for users of Maui or Moab schedulers: <br>
+<p><b>Note for users of Maui or Moab schedulers:</b><br>
 Maui and Moab are not integrated with SLURM's resource limits,
 but should use their own resource limits mechanisms.</p>
 
@@ -216,6 +210,6 @@ data maintained in the SLURM database.  More information can be found
 in the <a href="priority_multifactor.html">priority/multifactor</a>
 plugin description.</p>
 
-<p style="text-align: center;">Last modified 30 October 2012</p>
+<p style="text-align: center;">Last modified 12 August 2013</p>
 
 </ul></body></html>
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index 547643911ab60ae3f2b4e6dfe25f2a42e1c9c496..63c4949c17b776fe2622e1d5577c542f2c627e9a 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -73,6 +73,7 @@ Lead Slurm developers are:
 <li>Mark Grondona (Lawrence Livermore National Laboratory)</li>
 <li>Dmitri Gribenko</li>
 <li>Andriy Grytsenko (Massive Solutions Limited, Ukraine)</li>
+<li>Michael Gutteridge (Fred Hutchinson Cancer Research Center)</li>
 <br>
 <li>Chris Harwell (D. E. Shaw Research)</li>
 <li>Takao Hatazaki (HP)</li>
@@ -164,6 +165,6 @@ Lead Slurm developers are:
 <!-- INDIVIDUALS, PLEASE KEEP IN ALPHABETICAL ORDER -->
 </ul>
 
-<p style="text-align:center;">Last modified 29 July 2013</p>
+<p style="text-align:center;">Last modified 13 August 2013</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/topology.shtml b/doc/html/topology.shtml
index 4b36405e575a9377a5b63ba04ec2ef575a604eac..a3e4afee9a5827d98a61a96e147ed11821e55cf4 100644
--- a/doc/html/topology.shtml
+++ b/doc/html/topology.shtml
@@ -2,7 +2,7 @@
 
 <h1>Topology Guide</h1>
 
-<p>SLURM version 2.0 can be configured to support topology-aware resource
+<p>SLURM can be configured to support topology-aware resource
 allocation to optimize job performance.
 There are two primary modes of operation, one to optimize performance on
 systems with a three-dimensional torus interconnect and another for
@@ -19,12 +19,12 @@ allocated to the job.</p>
 <p>Some larger computers rely upon a three-dimensional torus interconnect.
 The IBM BlueGene computers is one example of this which has highly
 constrained resource allocation scheme, essentially requiring that
-jobs be allocated a set of nodes logically having a rectangular shape.
+jobs be allocated a set of nodes logically having a rectangular prism shape.
 SLURM has a plugin specifically written for BlueGene to select appropriate
 nodes for jobs, change network switch routing, boot nodes, etc as described
 in the <a href="bluegene.html">BlueGene User and Administrator Guide</a>.</p>
 
-<p>The Sun Constellation and Cray XT systems also have three-dimensional
+<p>The Sun Constellation and Cray systems also have three-dimensional
 torus interconnects, but do not require that jobs execute in adjacent nodes.
 On those systems, SLURM only needs to allocate resources to a job which
 are nearby on the network.
@@ -36,7 +36,7 @@ SLURM's native best-fit algorithm is thus able to achieve a high degree
 of locality for jobs.
 For more information, see SLURM's documentation for
 <a href="sun_const.html">Sun Constellation</a> and
-<a href="cray.html">Cray XT</a> systems.</p>
+<a href="cray.html">Cray</a> systems.</p>
 
 <h2>Hierarchical Networks</h2>
 
@@ -132,6 +132,16 @@ SwitchName=s3 Nodes=tux[12-15]
 SwitchName=s4 Switches=s[0-3]
 </pre>
 
-<p style="text-align:center;">Last modified 27 March 2012</p>
+<h2>User Options</h2>
+
+<p>For use with the topology/tree plugin, user can also specify the maximum
+number of leaf switches to be used for their job with the maximum time the
+job should wait for this optimized configuration. The syntax for this option
+is "--switches=count[@time]".
+The system administrator can limit the maximum time that any job can
+wait for this optimized configuration using the <i>SchedulerParameters</i>
+configuration parameter with the <i>max_switch_wait</i> option.</p>
+
+<p style="text-align:center;">Last modified 13 August 2013</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 2725bc87385178c59af30691ce69fd480ff9f00d..05f34c592567c2b672f78c4f3e2c5772a0064e89 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -54,10 +54,9 @@ gather and report incomplete information for these calls;
 \f2getrusage (3)\fP man page for information about which data are
 actually available on your system.
 .IP
-Elapsed time fields are presented as 2 fields, integral seconds and integral microseconds
-.IP
 Elapsed time fields are presented as
-[[days-]hours:]minutes:seconds.hundredths.
+[days-]hours:minutes:seconds[.microseconds].  Only 'CPU' fields will
+ever have microseconds.
 .IP
 The default input file is the file named in the
 \f3AccountingStorageLoc\fP parameter in slurm.conf.
diff --git a/src/common/gres.c b/src/common/gres.c
index 23a9cb117e32c3f8ac4331c0b8efd2971c809cd3..7f7139946b1d79d9e1208820312b1abb1d8af14b 100644
--- a/src/common/gres.c
+++ b/src/common/gres.c
@@ -940,8 +940,6 @@ extern int gres_plugin_node_config_unpack(Buf buffer, char* node_name)
 	gres_conf_list = list_create(_destroy_gres_slurmd_conf);
 
 	safe_unpack16(&version, buffer);
-	if (version != SLURM_PROTOCOL_VERSION)
-		return SLURM_ERROR;
 
 	safe_unpack16(&rec_cnt, buffer);
 	if (rec_cnt == 0)
@@ -3250,11 +3248,11 @@ step2:	if (!from_job_gres_list)
 			gres_job_ptr2->gres_cnt_alloc = gres_job_ptr->
 							gres_cnt_alloc;
 			gres_job_ptr2->node_cnt = new_node_cnt;
-			gres_job_ptr2->gres_bit_alloc = 
+			gres_job_ptr2->gres_bit_alloc =
 				xmalloc(sizeof(bitstr_t *) * new_node_cnt);
-			gres_job_ptr2->gres_bit_step_alloc = 
+			gres_job_ptr2->gres_bit_step_alloc =
 				xmalloc(sizeof(bitstr_t *) * new_node_cnt);
-			gres_job_ptr2->gres_cnt_step_alloc = 
+			gres_job_ptr2->gres_cnt_step_alloc =
 				xmalloc(sizeof(uint32_t) * new_node_cnt);
 			list_append(to_job_gres_list, gres_ptr2);
 		}
@@ -3469,7 +3467,7 @@ extern void gres_plugin_job_state_file(List gres_list, int *gres_bit_alloc,
 
 	slurm_mutex_lock(&gres_context_lock);
 	gres_iter = list_iterator_create(gres_list);
-	
+
 	for (j=0; j<gres_context_cnt; j++) {
 		found = 0;
 		list_iterator_reset(gres_iter);
@@ -4641,7 +4639,7 @@ extern int gres_num_gres_alloced_all(List gres_list, int arrlen,
 	return rc;
 }
 
-extern void gres_plugin_step_state_file(List gres_list, int *gres_bit_alloc, 
+extern void gres_plugin_step_state_file(List gres_list, int *gres_bit_alloc,
 					int *gres_count)
 {
 	int i, j, p, gres_cnt = 0, len, found;
diff --git a/src/common/job_resources.h b/src/common/job_resources.h
index 670bc830808617de2de4c8df812fd9bc799643e2..e2f44e82bc0fd77f3f67be954c19e3a1bccabb9d 100644
--- a/src/common/job_resources.h
+++ b/src/common/job_resources.h
@@ -70,7 +70,7 @@
  *			  is duplicated. See NOTES below.
  * memory_allocated	- MB per node reserved for the job or step
  * memory_used		- MB per node of memory consumed by job steps
- * nhosts		- Number of nodes in the allocation on a
+ * nhosts		- Number of nodes in the allocation.  On a
  *                        bluegene machine this represents the number
  *                        of midplanes used.  This should always be
  *                        the number of bits set in node_bitmap.
diff --git a/src/common/parse_config.c b/src/common/parse_config.c
index 1b2228306d4a2c08991c619acd1ae004d9a8449e..e3fb9142c9450c21a372492b507c0fe99607b592 100644
--- a/src/common/parse_config.c
+++ b/src/common/parse_config.c
@@ -289,17 +289,20 @@ static int _strip_continuation(char *buf, int len)
 	char *ptr;
 	int bs = 0;
 
+	if (len == 0)
+		return len;	/* Empty line */
+
 	for (ptr = buf+len-1; ptr >= buf; ptr--) {
 		if (*ptr == '\\')
 			bs++;
-		else if (isspace((int)*ptr) && bs == 0)
+		else if (isspace((int)*ptr) && (bs == 0))
 			continue;
 		else
 			break;
 	}
 	/* Check for an odd number of contiguous backslashes at
-	   the end of the line */
-	if (bs % 2 == 1) {
+	 * the end of the line */
+	if ((bs % 2) == 1) {
 		ptr = ptr + bs;
 		*ptr = '\0';
 		return (ptr - buf);
@@ -898,10 +901,11 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename,
 		return SLURM_ERROR;
 	}
 
-	line = xmalloc(sizeof(char) * stat_buf.st_size);
+	/* Buffer needs one extra byte for trailing '\0' */
+	line = xmalloc(sizeof(char) * stat_buf.st_size + 1);
 	line_number = 1;
 	while ((merged_lines = _get_next_line(
-			line, stat_buf.st_size, hash_val, f)) > 0) {
+			line, stat_buf.st_size + 1, hash_val, f)) > 0) {
 		/* skip empty lines */
 		if (line[0] == '\0') {
 			line_number += merged_lines;
diff --git a/src/common/read_config.c b/src/common/read_config.c
index b47a80b7bbcc3baa10131079e2a0929dd2dcb018..0a3c42c5ea61a9abcc84bd6defaf95982e0a58da 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -3889,6 +3889,15 @@ extern char * debug_flags2str(uint32_t debug_flags)
 {
 	char *rc = NULL;
 
+	/* When adding to this please attempt to keep flags in
+	 * alphabetical order.
+	 */
+
+	if (debug_flags & DEBUG_FLAG_BACKFILL) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Backfill");
+	}
 	if (debug_flags & DEBUG_FLAG_BG_ALGO) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -3899,11 +3908,6 @@ extern char * debug_flags2str(uint32_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "BGBlockAlgoDeep");
 	}
-	if (debug_flags & DEBUG_FLAG_BACKFILL) {
-		if (rc)
-			xstrcat(rc, ",");
-		xstrcat(rc, "Backfill");
-	}
 	if (debug_flags & DEBUG_FLAG_BG_PICK) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -3929,6 +3933,11 @@ extern char * debug_flags2str(uint32_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "ExtSensors");
 	}
+	if (debug_flags & DEBUG_FLAG_FILESYSTEM) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Filesystem");
+	}
 	if (debug_flags & DEBUG_FLAG_FRONT_END) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -3949,11 +3958,6 @@ extern char * debug_flags2str(uint32_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "Infiniband");
 	}
-	if (debug_flags & DEBUG_FLAG_FILESYSTEM) {
-		if (rc)
-			xstrcat(rc, ",");
-		xstrcat(rc, "Filesystem");
-	}
 	if (debug_flags & DEBUG_FLAG_JOB_CONT) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -3999,6 +4003,11 @@ extern char * debug_flags2str(uint32_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "Switch");
 	}
+	if (debug_flags & DEBUG_FLAG_THREADID) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "ThreadID");
+	}
 	if (debug_flags & DEBUG_FLAG_TRIGGERS) {
 		if (rc)
 			xstrcat(rc, ",");
diff --git a/src/common/slurm_acct_gather_energy.c b/src/common/slurm_acct_gather_energy.c
index 2e0a5681770f7048ac3be981a83adad695a33c6e..53c8584f476b38484f69e66cc5da80483d003fa6 100644
--- a/src/common/slurm_acct_gather_energy.c
+++ b/src/common/slurm_acct_gather_energy.c
@@ -166,20 +166,34 @@ extern void acct_gather_energy_destroy(acct_gather_energy_t *energy)
 extern void acct_gather_energy_pack(acct_gather_energy_t *energy, Buf buffer,
 				    uint16_t protocol_version)
 {
-	if (!energy) {
-		int i;
-		for (i=0; i<5; i++)
-			pack32(0, buffer);
-		pack_time(0, buffer);
-		return;
+	if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		if (!energy) {
+			int i;
+			for (i=0; i<5; i++)
+				pack32(0, buffer);
+			pack_time(0, buffer);
+			return;
+		}
+
+		pack32(energy->base_consumed_energy, buffer);
+		pack32(energy->base_watts, buffer);
+		pack32(energy->consumed_energy, buffer);
+		pack32(energy->current_watts, buffer);
+		pack32(energy->previous_consumed_energy, buffer);
+		pack_time(energy->poll_time, buffer);
+	} else {
+		if (!energy) {
+			int i;
+			for (i=0; i<4; i++)
+				pack32(0, buffer);
+			return;
+		}
+
+		pack32(energy->base_consumed_energy, buffer);
+		pack32(energy->base_watts, buffer);
+		pack32(energy->consumed_energy, buffer);
+		pack32(energy->current_watts, buffer);
 	}
-
-	pack32(energy->base_consumed_energy, buffer);
-	pack32(energy->base_watts, buffer);
-	pack32(energy->consumed_energy, buffer);
-	pack32(energy->current_watts, buffer);
-	pack32(energy->previous_consumed_energy, buffer);
-	pack_time(energy->poll_time, buffer);
 }
 
 extern int acct_gather_energy_unpack(acct_gather_energy_t **energy, Buf buffer,
@@ -188,12 +202,19 @@ extern int acct_gather_energy_unpack(acct_gather_energy_t **energy, Buf buffer,
 	acct_gather_energy_t *energy_ptr = acct_gather_energy_alloc();
 	*energy = energy_ptr;
 
-	safe_unpack32(&energy_ptr->base_consumed_energy, buffer);
-	safe_unpack32(&energy_ptr->base_watts, buffer);
-	safe_unpack32(&energy_ptr->consumed_energy, buffer);
-	safe_unpack32(&energy_ptr->current_watts, buffer);
-	safe_unpack32(&energy_ptr->previous_consumed_energy, buffer);
-	safe_unpack_time(&energy_ptr->poll_time, buffer);
+	if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		safe_unpack32(&energy_ptr->base_consumed_energy, buffer);
+		safe_unpack32(&energy_ptr->base_watts, buffer);
+		safe_unpack32(&energy_ptr->consumed_energy, buffer);
+		safe_unpack32(&energy_ptr->current_watts, buffer);
+		safe_unpack32(&energy_ptr->previous_consumed_energy, buffer);
+		safe_unpack_time(&energy_ptr->poll_time, buffer);
+	} else {
+		safe_unpack32(&energy_ptr->base_consumed_energy, buffer);
+		safe_unpack32(&energy_ptr->base_watts, buffer);
+		safe_unpack32(&energy_ptr->consumed_energy, buffer);
+		safe_unpack32(&energy_ptr->current_watts, buffer);
+	}
 
 	return SLURM_SUCCESS;
 
diff --git a/src/common/slurm_priority.h b/src/common/slurm_priority.h
index d62b5d59fa4200f1d793bf555d42d7cbb71c80fb..3e1c0ab7a62273371bd7dbf8f3abf92db3d02b02 100644
--- a/src/common/slurm_priority.h
+++ b/src/common/slurm_priority.h
@@ -67,7 +67,10 @@ extern double priority_g_calc_fs_factor(long double usage_efctv,
 extern List priority_g_get_priority_factors_list(
 	priority_factors_request_msg_t *req_msg, uid_t uid);
 
-/* Call at end of job to remove decayable limits at the end of the job */
+/* Call at end of job to remove decayable limits at the end of the job
+ * at least slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK,
+ * READ_LOCK, READ_LOCK }; should be locked before calling this
+ */
 extern void priority_g_job_end(struct job_record *job_ptr);
 
 #endif /*_SLURM_PRIORIY_H */
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
index a8bb9fdca372473f7fa55437d4e027411cf1a338..cb74d0f927012953ccda3867e8ba7b8fbe56b956 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
@@ -255,7 +255,7 @@ static slurmdb_step_rec_t *_slurmdb_create_step_rec(
 	slurmdb_step_rec_t *slurmdb_step = slurmdb_create_step_rec();
 
 	slurmdb_step->elapsed = filetxt_step->elapsed;
-	slurmdb_step->end = filetxt_step->header.timestamp;
+	slurmdb_step->end = filetxt_step->end;
 	slurmdb_step->exitcode = filetxt_step->exitcode;
 	slurmdb_step->ncpus = filetxt_step->ncpus;
 	if (filetxt_step->nodes) {
@@ -267,8 +267,7 @@ static slurmdb_step_rec_t *_slurmdb_create_step_rec(
 	slurmdb_step->requid = filetxt_step->requid;
 	memcpy(&slurmdb_step->stats, &filetxt_step->stats,
 	       sizeof(slurmdb_stats_t));
-	slurmdb_step->start = filetxt_step->header.timestamp -
-		slurmdb_step->elapsed;
+	slurmdb_step->start = slurmdb_step->end - slurmdb_step->elapsed;
 	slurmdb_step->state = filetxt_step->status;
 	slurmdb_step->stepid = filetxt_step->stepnum;
 	slurmdb_step->stepname = xstrdup(filetxt_step->stepname);
@@ -316,7 +315,7 @@ no_cond:
 	slurmdb_job->cluster = NULL;
 	slurmdb_job->elapsed = filetxt_job->elapsed;
 	slurmdb_job->eligible = filetxt_job->header.job_submit;
-	slurmdb_job->end = filetxt_job->header.timestamp;
+	slurmdb_job->end = filetxt_job->end;
 	slurmdb_job->exitcode = filetxt_job->exitcode;
 	slurmdb_job->gid = filetxt_job->header.gid;
 	slurmdb_job->jobid = filetxt_job->header.jobnum;
@@ -335,8 +334,7 @@ no_cond:
 	memcpy(&slurmdb_job->stats, &filetxt_job->stats,
 	       sizeof(slurmdb_stats_t));
 	slurmdb_job->show_full = filetxt_job->show_full;
-	slurmdb_job->start = filetxt_job->header.timestamp -
-		slurmdb_job->elapsed;
+	slurmdb_job->start = slurmdb_job->end - slurmdb_job->elapsed;
 	slurmdb_job->state = filetxt_job->status;
 
 	slurmdb_job->steps = list_create(slurmdb_destroy_step_rec);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_rollup.c b/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
index cb45bf15d658e794b60340d85cdb15f6551be30a..edf4f7556c659dc10c67ac48779f210d3dabf65b 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
@@ -1004,15 +1004,18 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		}
 
 		/* now apply the down time from the slurmctld disconnects */
-		list_iterator_reset(c_itr);
-		while ((loc_c_usage = list_next(c_itr)))
-			c_usage->d_cpu += loc_c_usage->total_time;
+		if (c_usage) {
+			list_iterator_reset(c_itr);
+			while ((loc_c_usage = list_next(c_itr)))
+				c_usage->d_cpu += loc_c_usage->total_time;
 
-		if ((rc = _process_cluster_usage(
-			     mysql_conn, cluster_name, curr_start,
-			     curr_end, now, c_usage)) != SLURM_SUCCESS) {
-			_destroy_local_cluster_usage(c_usage);
-			goto end_it;
+			if ((rc = _process_cluster_usage(
+				     mysql_conn, cluster_name, curr_start,
+				     curr_end, now, c_usage))
+			    != SLURM_SUCCESS) {
+				_destroy_local_cluster_usage(c_usage);
+				goto end_it;
+			}
 		}
 
 		list_iterator_reset(a_itr);
diff --git a/src/plugins/priority/basic/priority_basic.c b/src/plugins/priority/basic/priority_basic.c
index c28dc00ae80d0795e9f7cd6d21650f23ff02a7fa..6c6557078fe7cc50fb96c762da877e970afcf332 100644
--- a/src/plugins/priority/basic/priority_basic.c
+++ b/src/plugins/priority/basic/priority_basic.c
@@ -86,7 +86,7 @@
  */
 const char plugin_name[]       	= "Priority BASIC plugin";
 const char plugin_type[]       	= "priority/basic";
-const uint32_t plugin_version	= 101;
+const uint32_t plugin_version	= 100;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/priority/multifactor/priority_multifactor.c b/src/plugins/priority/multifactor/priority_multifactor.c
index a6c815a9c66989b8fe95bdac95e8d6b6307bcc54..43e5b3ba761954f1ae5416668493db11216dee6c 100644
--- a/src/plugins/priority/multifactor/priority_multifactor.c
+++ b/src/plugins/priority/multifactor/priority_multifactor.c
@@ -132,7 +132,7 @@ slurm_ctl_conf_t slurmctld_conf;
  */
 const char plugin_name[]	= "Priority MULTIFACTOR plugin";
 const char plugin_type[]	= "priority/multifactor";
-const uint32_t plugin_version	= 101;
+const uint32_t plugin_version	= 100;
 
 static pthread_t decay_handler_thread;
 static pthread_t cleanup_handler_thread;
@@ -1013,7 +1013,11 @@ static int _apply_new_usage(struct job_record *job_ptr,
 
 	if ((uint64_t)start_period >= job_time_limit_ends)
 		cpu_run_delta = 0;
-	else if (IS_JOB_FINISHED(job_ptr)) {
+	else if (IS_JOB_FINISHED(job_ptr) || IS_JOB_COMPLETING(job_ptr)) {
+		/* If a job is being requeued sometimes the state will
+		   be pending + completing so handle that the same as
+		   finished so we don't leave time in the mix.
+		*/
 		cpu_run_delta = job_ptr->total_cpus *
 			(job_time_limit_ends - (uint64_t)start_period);
 	} else
@@ -1262,7 +1266,8 @@ static void *_decay_thread(void *no_data)
 			itr = list_iterator_create(job_list);
 			while ((job_ptr = list_next(itr))) {
 				/* Don't need to handle finished jobs. */
-				if (IS_JOB_FINISHED(job_ptr))
+				if (IS_JOB_FINISHED(job_ptr)
+				    || IS_JOB_COMPLETING(job_ptr))
 					continue;
 				/* apply new usage */
 				if (!IS_JOB_PENDING(job_ptr) &&
@@ -1317,7 +1322,8 @@ static void *_decay_thread(void *no_data)
 			itr = list_iterator_create(job_list);
 			while ((job_ptr = list_next(itr))) {
 				/* Don't need to handle finished jobs. */
-				if (IS_JOB_FINISHED(job_ptr))
+				if (IS_JOB_FINISHED(job_ptr)
+				    || IS_JOB_COMPLETING(job_ptr))
 					continue;
 				/* apply new usage */
 				if (!IS_JOB_PENDING(job_ptr) &&
@@ -1759,12 +1765,12 @@ extern List priority_p_get_priority_factors_list(
 	return ret_list;
 }
 
+/* at least slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK,
+ * READ_LOCK, READ_LOCK }; should be locked before calling this */
 extern void priority_p_job_end(struct job_record *job_ptr)
 {
 	if (priority_debug)
 		info("priority_p_job_end: called for job %u", job_ptr->job_id);
 
-	slurm_mutex_lock(&decay_lock);
 	_apply_new_usage(job_ptr, g_last_ran, time(NULL));
-	slurm_mutex_unlock(&decay_lock);
 }
diff --git a/src/plugins/sched/wiki2/get_nodes.c b/src/plugins/sched/wiki2/get_nodes.c
index b0e882513a3db987e022a2d8f41ed3327672d34c..9bebe8e610c1b2fa8fa1d49082bb894c8f4bef50 100644
--- a/src/plugins/sched/wiki2/get_nodes.c
+++ b/src/plugins/sched/wiki2/get_nodes.c
@@ -330,6 +330,13 @@ static char *	_dump_node(struct node_record *node_ptr, hostlist_t hl,
 
 	snprintf(tmp, sizeof(tmp), ":STATE=%s;", _get_node_state(node_ptr));
 	xstrcat(buf, tmp);
+
+	if (node_ptr->cpu_load != NO_VAL) {
+		snprintf(tmp, sizeof(tmp), "CPULOAD=%f;",
+			 (node_ptr->cpu_load / 100.0));
+		xstrcat(buf, tmp);
+	}
+
 	if (node_ptr->reason) {
 		/* Strip out any quotes, they confuse Moab */
 		char *reason, *bad_char;
@@ -364,12 +371,6 @@ static char *	_dump_node(struct node_record *node_ptr, hostlist_t hl,
 	if (i > 0)
 		xstrcat(buf, ";");
 
-	if (node_ptr->cpu_load != NO_VAL) {
-		snprintf(tmp, sizeof(tmp), "CPULOAD=%f;",
-			 (node_ptr->cpu_load / 100.0));
-		xstrcat(buf, tmp);
-	}
-
 	if (node_ptr->arch) {
 		snprintf(tmp, sizeof(tmp), "ARCH=%s;", node_ptr->arch);
 		xstrcat(buf, tmp);
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index 6b23169c09e35f3e33b7221e3e3a61d45858fdce..3b18d5621ebb77e7e41ace98e14251a62fd9ac39 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -136,8 +136,11 @@ static int _compute_c_b_task_dist(struct job_record *job_ptr)
 	uint32_t n, i, tid, maxtasks, l;
 	uint16_t *avail_cpus;
 	job_resources_t *job_res = job_ptr->job_resrcs;
-	if (!job_res || !job_res->cpus) {
-		error("cons_res: _compute_c_b_task_dist given NULL job_ptr");
+	bool log_over_subscribe = true;
+
+	if (!job_res || !job_res->cpus || !job_res->nhosts) {
+		error("cons_res: _compute_c_b_task_dist invalid allocation "
+		      "for job %u", job_ptr->job_id);
 		return SLURM_ERROR;
 	}
 
@@ -148,10 +151,12 @@ static int _compute_c_b_task_dist(struct job_record *job_ptr)
 	/* ncpus is already set the number of tasks if overcommit is used */
 	if (!job_ptr->details->overcommit &&
 	    (job_ptr->details->cpus_per_task > 1)) {
-		if (job_ptr->details->ntasks_per_node == 0)
+		if (job_ptr->details->ntasks_per_node == 0) {
 			maxtasks = maxtasks / job_ptr->details->cpus_per_task;
-		else
-			maxtasks = job_ptr->details->ntasks_per_node * job_res->nhosts;
+		} else {
+			maxtasks = job_ptr->details->ntasks_per_node *
+				   job_res->nhosts;
+		}
 	}
 
 	/* Safe guard if the user didn't specified a lower number of
@@ -163,16 +168,20 @@ static int _compute_c_b_task_dist(struct job_record *job_ptr)
 	}
 	if (job_ptr->details->cpus_per_task == 0)
 		job_ptr->details->cpus_per_task = 1;
+	if (job_ptr->details->overcommit)
+		log_over_subscribe = false;
 	for (tid = 0, i = job_ptr->details->cpus_per_task ; (tid < maxtasks);
 	     i += job_ptr->details->cpus_per_task ) { /* cycle counter */
 		bool space_remaining = false;
-		if (over_subscribe) {
+		if (over_subscribe && log_over_subscribe) {
 			/* 'over_subscribe' is a relief valve that guards
 			 * against an infinite loop, and it *should* never
 			 * come into play because maxtasks should never be
 			 * greater than the total number of available cpus
 			 */
-			error("cons_res: _compute_c_b_task_dist oversubscribe");
+			error("cons_res: _compute_c_b_task_dist "
+			      "oversubscribe for job %u", job_ptr->job_id);
+			log_over_subscribe = false	/* Log once per job */;
 		}
 		for (n = 0; ((n < job_res->nhosts) && (tid < maxtasks)); n++) {
 			if ((i <= avail_cpus[n]) || over_subscribe) {
@@ -202,8 +211,11 @@ static int _compute_plane_dist(struct job_record *job_ptr)
 	uint32_t n, i, p, tid, maxtasks, l;
 	uint16_t *avail_cpus, plane_size = 1;
 	job_resources_t *job_res = job_ptr->job_resrcs;
-	if (!job_res || !job_res->cpus) {
-		error("cons_res: _compute_plane_dist given NULL job_res");
+	bool log_over_subscribe = true;
+
+	if (!job_res || !job_res->cpus || !job_res->nhosts) {
+		error("cons_res: _compute_c_b_task_dist invalid allocation "
+		      "for job %u", job_ptr->job_id);
 		return SLURM_ERROR;
 	}
 
@@ -222,16 +234,19 @@ static int _compute_plane_dist(struct job_record *job_ptr)
 		return SLURM_ERROR;
 	}
 	job_res->cpus = xmalloc(job_res->nhosts * sizeof(uint16_t));
-
+	if (job_ptr->details->overcommit)
+		log_over_subscribe = false;
 	for (tid = 0, i = 0; (tid < maxtasks); i++) { /* cycle counter */
 		bool space_remaining = false;
-		if (over_subscribe) {
+		if (over_subscribe && log_over_subscribe) {
 			/* 'over_subscribe' is a relief valve that guards
 			 * against an infinite loop, and it *should* never
 			 * come into play because maxtasks should never be
 			 * greater than the total number of available cpus
 			 */
-			error("cons_res: _compute_plane_dist oversubscribe");
+			error("cons_res: _compute_plane_dist oversubscribe "
+			      "for job %u", job_ptr->job_id);
+			log_over_subscribe = false	/* Log once per job */;
 		}
 		for (n = 0; ((n < job_res->nhosts) && (tid < maxtasks)); n++) {
 			for (p = 0; p < plane_size && (tid < maxtasks); p++) {
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 4c17161bcccfb9cf39deb69c297b5ad872198c51..16adaa8def070be1eacb2b373dfa756ffdcf06ea 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -1096,36 +1096,16 @@ static void _free_server_thread(void)
 
 static int _accounting_cluster_ready()
 {
-	struct node_record *node_ptr;
-	int i;
 	int rc = SLURM_ERROR;
 	time_t event_time = time(NULL);
-	uint32_t cpus = 0;
 	bitstr_t *total_node_bitmap = NULL;
 	char *cluster_nodes = NULL;
 	slurmctld_lock_t node_read_lock = {
 		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
 
 	lock_slurmctld(node_read_lock);
-	node_ptr = node_record_table_ptr;
-	for (i = 0; i < node_record_count; i++, node_ptr++) {
-		if (node_ptr->name == '\0')
-			continue;
-#ifdef SLURM_NODE_ACCT_REGISTER
-		if (slurmctld_conf.fast_schedule)
-			cpus += node_ptr->config_ptr->cpus;
-		else
-			cpus += node_ptr->cpus;
-#else
-		cpus += node_ptr->config_ptr->cpus;
-#endif
-	}
 
-	/* Since cluster_cpus is used else where we need to keep a
-	   local var here to avoid race conditions on cluster_cpus
-	   not being correct.
-	*/
-	cluster_cpus = cpus;
+	set_cluster_cpus();
 
 	/* Now get the names of all the nodes on the cluster at this
 	   time and send it also.
@@ -1693,6 +1673,35 @@ extern void send_all_to_accounting(time_t event_time)
 	send_resvs_to_accounting();
 }
 
+/* A slurmctld lock needs to at least have a node read lock set before
+ * this is called */
+extern void set_cluster_cpus(void)
+{
+	uint32_t cpus = 0;
+	struct node_record *node_ptr;
+	int i;
+
+	node_ptr = node_record_table_ptr;
+	for (i = 0; i < node_record_count; i++, node_ptr++) {
+		if (node_ptr->name == '\0')
+			continue;
+#ifdef SLURM_NODE_ACCT_REGISTER
+		if (slurmctld_conf.fast_schedule)
+			cpus += node_ptr->config_ptr->cpus;
+		else
+			cpus += node_ptr->cpus;
+#else
+		cpus += node_ptr->config_ptr->cpus;
+#endif
+	}
+
+	/* Since cluster_cpus is used else where we need to keep a
+	   local var here to avoid race conditions on cluster_cpus
+	   not being correct.
+	*/
+	cluster_cpus = cpus;
+}
+
 /*
  * _report_locks_set - report any slurmctld locks left set
  * RET count of locks currently set
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 84336daae4afba28ae95272f339d44e263034dbb..bbf2676916e8324773fe3ea6bbd8866f5d37a352 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -60,6 +60,7 @@
 
 #include "slurm/slurm_errno.h"
 
+#include "src/common/slurm_acct_gather.h"
 #include "src/common/assoc_mgr.h"
 #include "src/common/bitstring.h"
 #include "src/common/forward.h"
@@ -1750,6 +1751,12 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpack32(&num_tasks, buffer);
 
 		safe_unpackstr_xmalloc(&acctg_freq, &name_len, buffer);
+		if (acctg_freq && !strcmp(acctg_freq, "65534")) {
+			/* This fixes job state generated by version 2.6.0,
+			 * in which a version 2.5 value of NO_VAL was converted
+			 * from uint16_t to a string. */
+			xfree(acctg_freq);
+		}
 		safe_unpack16(&contiguous, buffer);
 		safe_unpack16(&cpus_per_task, buffer);
 		safe_unpack16(&nice, buffer);
@@ -1800,7 +1807,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpack32(&num_tasks, buffer);
 
 		safe_unpack16(&tmp_uint16, buffer);
-		if (tmp_uint16)
+		if (tmp_uint16 && (tmp_uint16 != (uint16_t) NO_VAL))
 			acctg_freq = xstrdup_printf("%u", tmp_uint16);
 		safe_unpack16(&contiguous, buffer);
 		safe_unpack16(&cpus_per_task, buffer);
@@ -4098,6 +4105,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		       char **err_msg)
 {
 	static int launch_type_poe = -1;
+	static uint32_t acct_freq_task = NO_VAL;
 	int error_code = SLURM_SUCCESS, i, qos_error;
 	struct part_record *part_ptr = NULL;
 	List part_ptr_list = NULL;
@@ -4111,6 +4119,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	static uint32_t node_scaling = 1;
 	static uint32_t cpus_per_mp = 1;
 	acct_policy_limit_set_t acct_policy_limit_set;
+	int acctg_freq;
 
 #ifdef HAVE_BG
 	uint16_t geo[SYSTEM_DIMENSIONS];
@@ -4148,6 +4157,26 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	if (error_code != SLURM_SUCCESS)
 		return error_code;
 
+	/* Validate a job's accounting frequency, if specified */
+	if (acct_freq_task == NO_VAL) {
+		char *acct_freq = slurm_get_jobacct_gather_freq();
+		int i = acct_gather_parse_freq(PROFILE_TASK, acct_freq);
+		xfree(acct_freq);
+		if (i != -1)
+			acct_freq_task = i;
+		else
+			acct_freq_task = (uint16_t) NO_VAL;
+	}
+	acctg_freq = acct_gather_parse_freq(PROFILE_TASK, job_desc->acctg_freq);
+	if ((acctg_freq != -1) &&
+	    ((acctg_freq == 0) || (acctg_freq > acct_freq_task))) {
+		error("Invalid accounting frequency (%d > %u)",
+		      acctg_freq, acct_freq_task);
+		error_code = ESLURMD_INVALID_ACCT_FREQ;
+		goto cleanup_fail;
+	}
+
+	/* insure that selected nodes are in this partition */
 	if (job_desc->req_nodes) {
 		error_code = node_name2bitmap(job_desc->req_nodes, false,
 					      &req_bitmap);
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 9c3e05a6ce7e7f425276f4e0570f3b4252269c03..0af60ed1510d3c9c27a1d3a63bf7cf209acd42c3 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -1885,7 +1885,10 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	node_ptr->os = reg_msg->os;
 	reg_msg->os = NULL;	/* Nothing left to free */
 
-	node_ptr->cpu_load = reg_msg->cpu_load;
+	if (node_ptr->cpu_load != reg_msg->cpu_load) {
+		node_ptr->cpu_load = reg_msg->cpu_load;
+		last_node_update = now;
+	}
 
 	if (IS_NODE_NO_RESPOND(node_ptr)) {
 		node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 1888e6a2ab5d9499d1f67a921c39ecd7ba46f80c..3bbbd0613a30b1a98618965e66b6902f35c62bc1 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -1742,11 +1742,12 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 			comp_msg->job_id, slurm_strerror(comp_msg->slurm_rc));
 		dump_job = job_requeue = true;
 #endif
-	/* Handle non-fatal errors here */
+	/* Handle non-fatal errors here. All others drain the node. */
 	} else if ((comp_msg->slurm_rc == SLURM_COMMUNICATIONS_SEND_ERROR) ||
 	           (comp_msg->slurm_rc == ESLURM_USER_ID_MISSING) ||
-		   (comp_msg->slurm_rc == ESLURMD_UID_NOT_FOUND) ||
-		   (comp_msg->slurm_rc == ESLURMD_GID_NOT_FOUND)) {
+		   (comp_msg->slurm_rc == ESLURMD_UID_NOT_FOUND)  ||
+		   (comp_msg->slurm_rc == ESLURMD_GID_NOT_FOUND)  ||
+		   (comp_msg->slurm_rc == ESLURMD_INVALID_ACCT_FREQ)) {
 		error("Slurmd error running JobId=%u on %s=%s: %s",
 		      comp_msg->job_id, msg_title, nodes,
 		      slurm_strerror(comp_msg->slurm_rc));
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 924c96077b79e4e2ad76338b91678a1dc0922b22..f60327629caba0d6aeece93f23e668c86a07edce 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -1681,6 +1681,13 @@ static int _sync_nodes_to_comp_job(void)
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if ((job_ptr->node_bitmap) && IS_JOB_COMPLETING(job_ptr)) {
 			update_cnt++;
+			/* This needs to be set up for the priority
+			   plugin and this happens before it is
+			   normally set up so do it now.
+			*/
+			if (!cluster_cpus)
+				set_cluster_cpus();
+
 			info("Job %u in completing state", job_ptr->job_id);
 			if (!job_ptr->node_bitmap_cg)
 				build_cg_bitmap(job_ptr);
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index bd0f87bf7519a73188b4b2760ec8436a521a5862..7fa670fa16c167651adccb25ef279eaac6673f93 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -1689,6 +1689,10 @@ extern void save_all_state(void);
 /* send all info for the controller to accounting */
 extern void send_all_to_accounting(time_t event_time);
 
+/* A slurmctld lock needs to at least have a node read lock set before
+ * this is called */
+extern void set_cluster_cpus(void);
+
 /* sends all jobs in eligible state to accounting.  Only needed at
  * first registration
  */
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 864b3cc8da402f1a762a43b488ccbf05142d9564..dd2f42ac0d59eaf3b59db9fa42b6211869e1357c 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -89,7 +89,7 @@ static int _check_acct_freq_task(uint32_t job_mem_lim, char *acctg_freq)
 		return 0;
 
 	if ((task_freq == 0) || (task_freq > conf->acct_freq_task)) {
-		error("Can't set frequency to %u, it is higher than %u.  "
+		error("Can't set frequency to %d, it is higher than %u.  "
 		      "We need it to be at least at this level to "
 		      "monitor memory usage.",
 		      task_freq, conf->acct_freq_task);
diff --git a/testsuite/expect/test1.72 b/testsuite/expect/test1.72
index 80ec1e2c2a064267e7cf26a66d4f1409b69e6969..d246f618c16055c571d3a0db923c6973f3ff9ec5 100755
--- a/testsuite/expect/test1.72
+++ b/testsuite/expect/test1.72
@@ -73,7 +73,7 @@ if {$sub_match != 1} {
 set sub_match 0
 spawn $srun -v --mem=200 --acctg-freq=[expr $freq_val + 10] sleep 5
 expect {
-	-re "Application launch failed: Invalid accounting" {
+	-re "Invalid accounting frequency" {
 		send_user "\nThis error is expect do not worry.\n"
 		set sub_match 1
 		exp_continue