From 806a03cee6b5886a9fb4ea2727d4c3ecbf4eb44e Mon Sep 17 00:00:00 2001
From: Brian Christiansen <brian@schedmd.com>
Date: Wed, 17 Jun 2015 09:33:39 -0700
Subject: [PATCH] Lowercase Slurm in docs.

---
 BUILD.NOTES                                   |  16 +-
 INSTALL                                       |   4 +-
 contribs/README                               |  44 ++---
 doc/html/accounting.shtml                     |  10 +-
 doc/html/accounting_storageplugins.shtml      |   6 +-
 doc/html/acct_gather_energy_plugins.shtml     |   6 +-
 doc/html/api.shtml                            |   6 +-
 doc/html/bluegene.shtml                       |  18 +-
 doc/html/cgroups.shtml                        |  10 +-
 doc/html/configurator.easy.html.in            |  48 ++---
 doc/html/configurator.html.in                 |  70 +++----
 doc/html/cons_res_share.shtml                 |   2 +-
 doc/html/core_spec_plugins.shtml              |   6 +-
 doc/html/cpu_management.shtml                 |   8 +-
 doc/html/cray_alps.shtml                      |   6 +-
 doc/html/crypto_plugins.shtml                 |   6 +-
 doc/html/disclaimer.shtml                     |   2 +-
 doc/html/elastic_computing.shtml              |   2 +-
 doc/html/ext_sensorsplugins.shtml             |   6 +-
 doc/html/faq.shtml                            |   8 +-
 doc/html/gang_scheduling.shtml                |   4 +-
 doc/html/gres.shtml                           |   4 +-
 doc/html/gres_design.shtml                    |   2 +-
 doc/html/gres_plugins.shtml                   |   6 +-
 doc/html/hdf5_profile_user_guide.shtml        |   2 +-
 doc/html/high_throughput.shtml                |   2 +-
 doc/html/ibm-pe.shtml                         |  12 +-
 doc/html/ibm.shtml                            |   8 +-
 doc/html/job_exit_code.shtml                  |   8 +-
 doc/html/jobacct_gatherplugins.shtml          |   6 +-
 doc/html/launch_plugins.shtml                 |   6 +-
 doc/html/maui.shtml                           |   6 +-
 doc/html/moab.shtml                           |  18 +-
 doc/html/mpi_guide.shtml                      |  32 ++--
 doc/html/overview.shtml                       |   4 +-
 doc/html/plugins.shtml                        |  10 +-
 doc/html/power_save.shtml                     |   2 +-
 doc/html/preempt.shtml                        |   8 +-
 doc/html/preemption_plugins.shtml             |  10 +-
 doc/html/priority_plugins.shtml               |  10 +-
 doc/html/programmer_guide.shtml               |   8 +-
 doc/html/prolog_epilog.shtml                  |   2 +-
 doc/html/publications.shtml                   |   2 +-
 doc/html/quickstart_admin.shtml               |  20 +-
 doc/html/resource_limits.shtml                |   4 +-
 doc/html/review_release.html                  |  76 --------
 doc/html/sun_const.shtml                      |   6 +-
 doc/html/taskplugins.shtml                    |   8 +-
 doc/html/topology.shtml                       |  12 +-
 doc/html/topology_plugin.shtml                |   6 +-
 doc/html/troubleshoot.shtml                   |   4 +-
 doc/man/man1/sacct.1                          |  24 +--
 doc/man/man1/sacctmgr.1                       |  26 +--
 doc/man/man1/salloc.1                         |  74 +++----
 doc/man/man1/sattach.1                        |  20 +-
 doc/man/man1/sbatch.1                         |  72 +++----
 doc/man/man1/sbcast.1                         |  18 +-
 doc/man/man1/scancel.1                        |  12 +-
 doc/man/man1/scontrol.1                       |  52 ++---
 doc/man/man1/sdiag.1                          |  12 +-
 doc/man/man1/sh5util.1                        |   6 +-
 doc/man/man1/sinfo.1                          |  30 +--
 doc/man/man1/slurm.1                          |  10 +-
 doc/man/man1/smap.1                           |  20 +-
 doc/man/man1/sprio.1                          |   8 +-
 doc/man/man1/squeue.1                         |  16 +-
 doc/man/man1/sreport.1                        |  10 +-
 doc/man/man1/srun.1                           |  78 ++++----
 doc/man/man1/srun_cr.1                        |  12 +-
 doc/man/man1/sshare.1                         |  14 +-
 doc/man/man1/sstat.1                          |  10 +-
 doc/man/man1/strigger.1                       |  10 +-
 doc/man/man1/sview.1                          |  16 +-
 doc/man/man3/slurm_allocate_resources.3       |  14 +-
 doc/man/man3/slurm_checkpoint_error.3         |  12 +-
 doc/man/man3/slurm_clear_trigger.3            |   6 +-
 doc/man/man3/slurm_complete_job.3             |   8 +-
 doc/man/man3/slurm_free_ctl_conf.3            |  16 +-
 doc/man/man3/slurm_free_front_end_info_msg.3  |   8 +-
 doc/man/man3/slurm_free_job_info_msg.3        |  20 +-
 .../slurm_free_job_step_info_response_msg.3   |  10 +-
 doc/man/man3/slurm_free_node_info.3           |   8 +-
 doc/man/man3/slurm_free_partition_info.3      |  10 +-
 doc/man/man3/slurm_get_errno.3                |   6 +-
 doc/man/man3/slurm_hostlist_create.3          |   6 +-
 doc/man/man3/slurm_job_step_create.3          |   8 +-
 doc/man/man3/slurm_kill_job.3                 |   8 +-
 doc/man/man3/slurm_load_reservations.3        |  10 +-
 doc/man/man3/slurm_reconfigure.3              |  12 +-
 doc/man/man3/slurm_slurmd_status.3            |   6 +-
 doc/man/man3/slurm_step_ctx_create.3          |   8 +-
 doc/man/man3/slurm_step_launch.3              |   8 +-
 doc/man/man5/acct_gather.conf.5               |  10 +-
 doc/man/man5/bluegene.conf.5                  |  16 +-
 doc/man/man5/cgroup.conf.5                    |   8 +-
 doc/man/man5/cray.conf.5                      |  10 +-
 doc/man/man5/ext_sensors.conf.5               |   8 +-
 doc/man/man5/gres.conf.5                      |  24 +--
 doc/man/man5/nonstop.conf.5                   |   2 +-
 doc/man/man5/slurm.conf.5                     | 180 +++++++++---------
 doc/man/man5/slurmdbd.conf.5                  |  12 +-
 doc/man/man5/topology.conf.5                  |  14 +-
 doc/man/man5/wiki.conf.5                      |  36 ++--
 doc/man/man8/slurmctld.8                      |   8 +-
 doc/man/man8/slurmd.8                         |  12 +-
 doc/man/man8/slurmdbd.8                       |   6 +-
 doc/man/man8/slurmstepd.8                     |  10 +-
 doc/man/man8/spank.8                          |  52 ++---
 108 files changed, 829 insertions(+), 905 deletions(-)
 delete mode 100644 doc/html/review_release.html

diff --git a/BUILD.NOTES b/BUILD.NOTES
index e1e91518d09..74f29a8dd06 100644
--- a/BUILD.NOTES
+++ b/BUILD.NOTES
@@ -21,9 +21,9 @@ and libtool installed):
  ./autogen.sh
 then check-in the new Makefile.am and Makefile.in files
 
-Here is a step-by-step HOWTO for creating a new release of SLURM on a
+Here is a step-by-step HOWTO for creating a new release of Slurm on a
 Linux cluster (See BlueGene and AIX specific notes below for some differences).
-0. Get current copies of SLURM and buildfarm
+0. Get current copies of Slurm and buildfarm
    > git clone https://<user_name>@github.com/chaos/slurm.git
    > svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
    place the buildfarm directory in your search path
@@ -86,7 +86,7 @@ BlueGene build notes:
    /usr/src/packages/RPMS/ppc64
 
 To build and run on AIX:
-0. Get current copies of SLURM and buildfarm
+0. Get current copies of Slurm and buildfarm
    > git clone https://<user_name>@github.com/chaos/slurm.git
    > svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
    put the buildfarm directory in your search path
@@ -116,7 +116,7 @@ To build and run on AIX:
 3. To build RPMs (NOTE: GNU tools early in PATH as described above in #0):
    Create a .rpmmacros file specifying system specific files:
 	#
-	# RPM Macros for use with SLURM on AIX
+	# RPM Macros for use with Slurm on AIX
 	# The system-wide macros for RPM are in /usr/lib/rpm/macros
 	# and this overrides a few of them
 	#
@@ -142,7 +142,7 @@ To build and run on AIX:
    --nosnapshot will name the tar-ball and RPMs based upon the META file
    --snapshot will name the tar-ball and RPMs based upon the META file plus a
    timestamp. Do this to make a tar-ball for a non-tagged release.
-4. Test POE after telling POE where to find SLURM's LoadLeveler wrapper.
+4. Test POE after telling POE where to find Slurm's LoadLeveler wrapper.
    > export MP_RMLIB=./slurm_ll_api.so
    > export CHECKPOINT=yes
 5. > poe hostname -rmpool debug
@@ -182,8 +182,8 @@ BlueGene bglblock boot problem diagnosis
   - Execute /admin/bglscripts/fatalras
     This will produce a list of failures including Rack and Midplane number
     <date> R<rack> M<midplane> <failure details>
-  - Translate the Rack and Midplane to SLURM node id: smap -R r<rack><midplane>
-  - Drain only the bad SLURM node, return others to service using scontrol
+  - Translate the Rack and Midplane to Slurm node id: smap -R r<rack><midplane>
+  - Drain only the bad Slurm node, return others to service using scontrol
 
 Configuration file update procedures:
   - cd /usr/bgl/dist/slurm (on bgli)
@@ -199,7 +199,7 @@ Some RPM commands:
   rpm -e slurm-1.1.8-1                  (erase an rpm)
   rpm --upgrade slurm-1.1.9-1.rpm       (replace existing rpm with new version)
   rpm -i --ignoresize slurm-1.1.9-1.rpm (install a new rpm)
-For main SLURM plugin installation on BGL service node:
+For main Slurm plugin installation on BGL service node:
   rpm -i --force --nodeps --ignoresize slurm-1.1.9-1.rpm
   rpm -U --force --nodeps --ignoresize slurm-1.1.9-1.rpm  (upgrade option)
 
diff --git a/INSTALL b/INSTALL
index ce628085815..93a17a61583 100644
--- a/INSTALL
+++ b/INSTALL
@@ -8,8 +8,8 @@ Basic Installation
 ==================
 
    These are generic Linux installation instructions. Build instructions 
-specific to SLURM are available at 
-http://www.llnl.gov/linux/slurm/quickstart_admin.html
+specific to Slurm are available at 
+http://slurm.schedmd.com/quickstart_admin.html
 (also found in the file doc/html/quickstart_admin.shtml).
 
    The `configure' shell script attempts to guess correct values for
diff --git a/contribs/README b/contribs/README
index febe1c6109b..29ebbdd19b2 100644
--- a/contribs/README
+++ b/contribs/README
@@ -1,11 +1,11 @@
-This is the contribs dir for SLURM.
+This is the contribs dir for Slurm.
 
 SOURCE DISTRIBUTION HIERARCHY
 -----------------------------
 
 Subdirectories contain the source-code for the various contributations for
-SLURM as their documentation. A quick description of the subdirectories
-of the SLURM contribs distribution follows:
+Slurm as their documentation. A quick description of the subdirectories
+of the Slurm contribs distribution follows:
 
   cray                [Tools for use on Cray systems]
      etc_sysconfig_slurm   - /etc/sysconfig/slurm for Cray XT/XE systems
@@ -39,7 +39,7 @@ of the SLURM contribs distribution follows:
      on the node where the moab daemon runs.
 
   lua                [ LUA scripts ]
-     Example LUA scripts that can serve as SLURM plugins.
+     Example LUA scripts that can serve as Slurm plugins.
      job_submit.lua - job_submit plugin that can set a job's default partition
 		      using a very simple algorithm
      job_submit_license.lua - job_submit plugin that can set a job's use of
@@ -49,33 +49,33 @@ of the SLURM contribs distribution follows:
 
   make-3.81.slurm.patch   [ Patch to "make" command for parallel build ]
   make-4.0.slurm.patch    [ Patch to "make" command for parallel build ]
-     This patch will use SLURM to launch tasks across a job's current resource
+     This patch will use Slurm to launch tasks across a job's current resource
      allocation. Depending upon the size of modules to be compiled, this may
      or may not improve performance. If most modules are thousands of lines
      long, the use of additional resources should more than compensate for the
-     overhead of SLURM's task launch. Use with make's "-j" option within an
-     existing SLURM allocation. Outside of a SLURM allocation, make's behavior
+     overhead of Slurm's task launch. Use with make's "-j" option within an
+     existing Slurm allocation. Outside of a Slurm allocation, make's behavior
      will be unchanged. Designed for GNU make-3.81 or make-4.0.
 
   mic                [Tools for use on Intel MIC processors]
 
-  mpich1.slurm.patch [ Patch to mpich1/p4 library for SLURM job task launch ]
-     For SLURM based job initiations (from srun command), get the parameters
+  mpich1.slurm.patch [ Patch to mpich1/p4 library for Slurm job task launch ]
+     For Slurm based job initiations (from srun command), get the parameters
      from environment variables as needed. This allows for a truly parallel
      job launch using the existing "execer" mode of operation with slight
      modification.
 
-  pam                [ PAM (Pluggable Authentication Module) for SLURM ]
+  pam                [ PAM (Pluggable Authentication Module) for Slurm ]
      This PAM module will restrict who can login to a node to users who have
      been allocated resources on the node and user root.
 
-  perlapi/           [ Perl API to SLURM source ]
-     API to SLURM using perl.  Making available all SLURM command that exist
-     in the SLURM proper API.
+  perlapi/           [ Perl API to Slurm source ]
+     API to Slurm using perl.  Making available all Slurm command that exist
+     in the Slurm proper API.
 
-  phpext    	     [ PHP API to SLURM source ]
-     API to SLURM using php.  Not a complete API, but offers quite a few
-     interfaces to existing SLURM proper APIs.
+  phpext    	     [ PHP API to Slurm source ]
+     API to Slurm using php.  Not a complete API, but offers quite a few
+     interfaces to existing Slurm proper APIs.
 
   pmi2               [ PMI2 client library ]
      User applications can link with this library to use Slurm's mpi/pmi2
@@ -97,7 +97,7 @@ of the SLURM contribs distribution follows:
      Tools for managing job exit code records
 
   sjstat             [ Perl program ]
-     Lists attributes of jobs under SLURM control
+     Lists attributes of jobs under Slurm control
 
   skilling.c         [ C program ]
      This program can be used to order the hostnames in a 2+ dimensional
@@ -105,7 +105,7 @@ of the SLURM contribs distribution follows:
      the Hilbert number based upon a node's physical location in the
      computer. Nodes close together in their Hilbert number will also be
      physically close in 2-D or 3-D space, so we can reduce the 2-D or 3-D
-     job placement problem to a 1-D problem that SLURM can easily handle
+     job placement problem to a 1-D problem that Slurm can easily handle
      by defining the node names in the slurm.conf file in order of their
      Hilbert number. If the computer is not a perfect square or cube with
      power of two size, then collapse the node list maintaining the numeric
@@ -115,10 +115,10 @@ of the SLURM contribs distribution follows:
      Scripts to help in option completion when using slurm commands.
 
   slurmdb-direct     [ Perl program ]
-     Program that permits writing directly to SlurmDBD (SLURM DataBase Daemon).
+     Program that permits writing directly to SlurmDBD (Slurm DataBase Daemon).
 
   spank_core.c       [ SPANK plugin, C program ]
-     A SLURM SPANK plugin that can be used to permit users to generated
+     A Slurm SPANK plugin that can be used to permit users to generated
      light-weight core files rather than full core files.
 
   time_login.c       [ C program ]
@@ -127,7 +127,7 @@ of the SLURM contribs distribution follows:
      will not have their environment properly set for jobs submitted through
      Moab. Build with "make -f /dev/null time_login" and execute as user root.
 
-  torque/            [ Wrapper Scripts for Torque migration to SLURM ]
-     Helpful scripts to make transition to SLURM easier from PBS or Torque.
+  torque/            [ Wrapper Scripts for Torque migration to Slurm ]
+     Helpful scripts to make transition to Slurm easier from PBS or Torque.
      These scripts are easily updatable if there is functionality missing.
      NOTE: For the showq command, see https://github.com/pedmon/slurm_showq
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index f78ff6d5501..f22ec9e29c5 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -67,13 +67,13 @@ sacctmgr).
 Making potentially sensitive information available to all users makes
 database security more difficult to provide. Sending the data through
 an intermediate daemon can provide better security and performance
-(through caching data). SlurmDBD (SLURM Database Daemon) provides such services.
+(through caching data). SlurmDBD (Slurm Database Daemon) provides such services.
 SlurmDBD is written in C, multi-threaded, secure and fast.
 The configuration required to use SlurmDBD will be described below.
 Storing information directly into a database would be similar.</p>
 
 <p>Note that SlurmDBD relies upon existing Slurm plugins
-for authentication and Slurm sql for database use, but the other SLURM
+for authentication and Slurm sql for database use, but the other Slurm
 commands and daemons are not required on the host where
 SlurmDBD is installed. Install the <i>slurmdbd</i>,
 <i>slurm-plugins</i>, and <i>slurm-sql</i> RPMs on the computer when SlurmDBD
@@ -122,7 +122,7 @@ communications to the SlurmDBD and we recommend
 If you have one cluster managed by Slurm and execute the SlurmDBD
 on that one cluster, the normal MUNGE configuration will suffice.
 Otherwise MUNGE should then be installed on all nodes of all
-SLURM managed clusters, plus the machine where SlurmDBD executes.
+Slurm managed clusters, plus the machine where SlurmDBD executes.
 You then have a choice of either having a single MUNGE key for
 all of these computers or maintaining a unique key for each of the
 clusters plus a second key for communications between the clusters
@@ -133,7 +133,7 @@ started with different configurations to support two different keys
 (create two key files and start the daemons with the
 <i>--key-file</i> option to locate the proper key plus the
 <i>--socket</i> option to specify distinct local domain sockets for each).
-The pathname of local domain socket will be needed in the SLURM
+The pathname of local domain socket will be needed in the Slurm
 and SlurmDBD configuration files (slurm.conf and slurmdbd.conf
 respectively, more details are provided below).</p>
 
@@ -345,7 +345,7 @@ the named socket used by MUNGE to provide enterprise-wide.
 Otherwise the default MUNGE daemon will be used.</li>
 
 <li><b>AuthType</b>:
-Define the authentication method for communications between SLURM
+Define the authentication method for communications between Slurm
 components. A value of "auth/munge" is recommended.</li>
 
 <li><b>DbdHost</b>:
diff --git a/doc/html/accounting_storageplugins.shtml b/doc/html/accounting_storageplugins.shtml
index 0ea10965afd..dae590bb452 100644
--- a/doc/html/accounting_storageplugins.shtml
+++ b/doc/html/accounting_storageplugins.shtml
@@ -8,7 +8,7 @@ defines them. It is intended as a resource to programmers wishing to write
 their own Slurm Job Accounting Storage plugins. This is version 1 of the API.
 
 <p>Slurm Accounting Storage plugins must conform to the
-SLURM Plugin API with the following specifications:
+Slurm Plugin API with the following specifications:
 
 <p><span class="commandline">const char
 plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
@@ -64,8 +64,8 @@ implemented must be stubbed.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/acct_gather_energy_plugins.shtml b/doc/html/acct_gather_energy_plugins.shtml
index f7f6f3c30c8..7e1361acc94 100644
--- a/doc/html/acct_gather_energy_plugins.shtml
+++ b/doc/html/acct_gather_energy_plugins.shtml
@@ -9,7 +9,7 @@ defines them. It is intended as a resource to programmers wishing to write
 their own Slurm energy accounting plugins.
 
 <p>Slurm energy accounting plugins must conform to the
-SLURM Plugin API with the following specifications:
+Slurm Plugin API with the following specifications:
 
 <p><span class="commandline">const char
 plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
@@ -57,8 +57,8 @@ implemented must be stubbed.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/api.shtml b/doc/html/api.shtml
index 097abcf28d9..47cf24e8b48 100644
--- a/doc/html/api.shtml
+++ b/doc/html/api.shtml
@@ -6,7 +6,7 @@
 <p>All of the Slurm commands utilize a collection of Application Programming
 Interfaces (APIs).
 User and system applications can directly use these APIs as desired to
-achieve tighter integration with SLURM.
+achieve tighter integration with Slurm.
 For example, Slurm data structures and error codes can be directly
 examined rather than executing Slurm commands and parsing their output.
 This document describes Slurm APIs.
@@ -128,7 +128,7 @@ last Slurm API function executed.</li>
 standard output.</li>
 
 <li><b>slurm_strerror</b>&#151;Return a string describing a specific
-SLURM error code.</li>
+Slurm error code.</li>
 
 </ul>
 <p class="footer"><a href="#top">top</a></p>
@@ -312,7 +312,7 @@ into a list of individual node names.</p>
 
 <ul>
 
-<li><b>slurm_hostlist_create</b>&#151;Translate a SLURM
+<li><b>slurm_hostlist_create</b>&#151;Translate a Slurm
 node name expression into a record used for parsing.
 Use <i>slurm_hostlist_destroy</i> to free the allocated
 storage.</li>
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index e3febe5ed4f..6d7b54a450d 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -59,7 +59,7 @@ MPs=0000 type=small 32cnblocks=16
 ...
 </pre>
 <p>This will create a small block on each nodeboard on the system.  If your
-system is different than this, adjust appropriately.  The idea is SLURM
+system is different than this, adjust appropriately.  The idea is Slurm
 will create the smallest block possible on every possible hardware
 location.  The system will then check for missing hardware and remove
 blocks that are invalid.  This will get around the problem if you
@@ -106,7 +106,7 @@ Note that this is a c-node count.</p>
 
 <p>Use Slurm's <i>srun</i> command to launch tasks (<i>srun</i> uses
 an api interface into IBM's <i>runjob</i> command).
-SLURM job step information, including accounting, functions as
+Slurm job step information, including accounting, functions as
 expected.  Totalview and other debuggers will also work with srun.
 If Slurm is installed and configured correctly IBM's runjob will not
 work.</p>
@@ -147,7 +147,7 @@ coordinates with a zero origin. The suffix contains three digits on BlueGene/L
 and BlueGene/P systems, while four digits are required for the BlueGene/Q
 systems. For example, "bgp012" represents the midplane whose coordinate
 is at X=0, Y=1 and Z=2.
-SLURM uses an abbreviated format for describing midplanes in which the
+Slurm uses an abbreviated format for describing midplanes in which the
 end-points of the block enclosed are in square-brackets and separated by an "x".
 For example, "bgp[620x731]" is used to represent the eight midplanes
 enclosed in a block with end-points and bgp620 and bgp731 (bgp620, bgp621,
@@ -277,12 +277,12 @@ information from the system and manipulate blocks.
 
 <p>In order to make <i>srun</i> operate correctly with the underlying system
 and to ensure security for new MPI jobs, it is necessary to enable the
-SLURM plugin for the IBM runjob_mux.  This is
+Slurm plugin for the IBM runjob_mux.  This is
 done by altering the bg.properties file. In the [runjob.mux]
 section of the bg.properties file change the plugin option to
 <i>$prefix/lib/slurm/runjob_plugin.so</i> and also set the plugin_flags
 option to <i>0x0109</i> (RTLD_LAZY | RTLD_GLOBAL | RTLD_DEEPBIND)
-which allows the forwarding of symbols to shared objects like SLURM
+which allows the forwarding of symbols to shared objects like Slurm
 uses for plugins.</p>
 <pre>
 [runjob.mux]
@@ -363,7 +363,7 @@ not allow a sub-block to be used with any dimension of 3.
 </p><p>
 In the current IBM API it does not allow wrapping inside a midplane.
 Meaning you can not create a sub-block of 2 with nodes in the 0 and 3 position.
-SLURM will support this in the future when the underlying system allows it.
+Slurm will support this in the future when the underlying system allows it.
 </p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -597,7 +597,7 @@ file (i.e. <i>MidplaneNodeCnt=512</i> and <i>NodeCardNodeCnt=32</i>).</p>
 
 <p>Note that the <i>IONodesPerMP</i> value defined in
 <i>bluegene.conf</i> represents how many ionodes are on each midplane.
-SLURM does not support heterogeneous ionode configurations so if your
+Slurm does not support heterogeneous ionode configurations so if your
 environment is like this place the smallest number here.  For most BlueGene/L
 systems this value is either 8 (for IO poor systems) or 64 (for IO rich
 systems). For BlueGene/Q systems 4 to 16 are most common.</p>
@@ -736,7 +736,7 @@ If defining blocks of <i>Type=SMALL</i>, the Slurm partition
 containing them as defined in <i>slurm.conf</i> must have the
 parameter <i>Shared=force</i> to enable scheduling of multiple
 jobs on wha. Slurm considers a single node.
-SLURM partitions that do not contain blocks of <i>Type=SMALL</i>
+Slurm partitions that do not contain blocks of <i>Type=SMALL</i>
 may have the parameter <i>Shared=no</i> for a slight improvement in
 scheduler performance.
 As in all Slurm configuration files, parameters and values
@@ -769,7 +769,7 @@ When a job is scheduled, the appropriate block is identified,
 its user set, and it is booted.
 On BlueGene/L and BlueGene/P systems Node use (virtual or coprocessor)
 is set from the mpirun command line.
-SLURM has nothing to do with setting the node use.
+Slurm has nothing to do with setting the node use.
 Subsequent jobs use this same block without rebooting by changing
 the associated user field.
 The only time blocks should be freed and rebooted, in normal operation,
diff --git a/doc/html/cgroups.shtml b/doc/html/cgroups.shtml
index 4df1cf2eea8..f6e9084ed56 100644
--- a/doc/html/cgroups.shtml
+++ b/doc/html/cgroups.shtml
@@ -65,7 +65,7 @@ are common to all cgroup plugins, plus additional options that apply only to
 specific plugins.</li>
 <li>System-level resource specialization is enabled using node configuration 
 parameters.</li>
-<li>Additional configuration is required to enable automatic removal of SLURM
+<li>Additional configuration is required to enable automatic removal of Slurm
 cgroups when they are no longer in use.
 See <a href="#cleanup">Cleanup of Slurm Cgroups</a> below for details.</li>
 </ul>
@@ -142,7 +142,7 @@ hierarchies are created below this base directory. A separate hierarchy is
 created for each cgroup subsystem in use. The name of the root cgroup in each
 hierarchy is the subsystem name. A cgroup named <i>slurm</i> is created below
 the root cgroup in each hierarchy. Below each <i>slurm</i> cgroup, cgroups for
-SLURM users, jobs, steps and tasks are created dynamically as needed. The names
+Slurm users, jobs, steps and tasks are created dynamically as needed. The names
 of these cgroups consist of a prefix identifying the Slurm entity (user, job,
 step or task), followed by the relevant numeric id. The following example shows
 the path of the task cgroup in the cpuset hierarchy for taskid#2 of stepid#0 of
@@ -160,7 +160,7 @@ use more than one node will have a cgroup structure on each node.</p>
 <p>Linux provides a mechanism for the automatic removal of a cgroup when its
 state changes from non-empty to empty.  A cgroup is empty when no processes are
 attached to it and it has no child cgroups.  The Slurm cgroups implementation
-allows this mechanism to be used to automatically remove the relevant SLURM
+allows this mechanism to be used to automatically remove the relevant Slurm
 cgroups when tasks, steps and jobs terminate. To enable this automatic removal
 feature, follow these steps:</p>
 <ul>
@@ -175,10 +175,10 @@ The default location is /etc/slurm/cgroup.</li>
 </pre>
 <li>Create the common release agent file.  This file should be named
 <i>release_common</i>.  An example script for this file is provided in the
-SLURM delivery at etc/cgroup.release_common.example. The example script will
+Slurm delivery at etc/cgroup.release_common.example. The example script will
 automatically remove user, job, step and task cgroups as they become empty. The
 file must have execute permission for root.</li><br>
-<li>Create release agent files for each cgroup subsystem to be used by SLURM.
+<li>Create release agent files for each cgroup subsystem to be used by Slurm.
 This depends on which cgroup plugins are enabled.  For example, the
 proctrack/cgroup plugin uses the <i>freezer</i> subsystem.  See
 <a href="#available">Currently Available Cgroup Plugins</a> above to find out
diff --git a/doc/html/configurator.easy.html.in b/doc/html/configurator.easy.html.in
index f08d96a7448..6ee31ff228d 100644
--- a/doc/html/configurator.easy.html.in
+++ b/doc/html/configurator.easy.html.in
@@ -4,25 +4,25 @@ Copyright (C) 2008-2011 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 Written by Morris Jette <jette1@llnl.gov> and Danny Auble <da@llnl.gov>
 
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 You should have received a copy of the GNU General Public License along
-with SLURM; if not, write to the Free Software Foundation, Inc.,
+with Slurm; if not, write to the Free Software Foundation, Inc.,
 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 -->
 <HTML>
-<HEAD><TITLE>SLURM System Configuration Tool</TITLE>
+<HEAD><TITLE>Slurm System Configuration Tool</TITLE>
 <SCRIPT type="text/javascript">
 <!--
 function get_field(name,form)
@@ -186,24 +186,24 @@ function displayfile()
 </HEAD>
 <BODY>
 <FORM name=config>
-<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool - Easy Version</H1>
-<P>This form can be used to create a SLURM configuration file with
+<H1>Slurm Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool - Easy Version</H1>
+<P>This form can be used to create a Slurm configuration file with
 you controlling many of the important configuration parameters.</P>
 
-<p>This is a simplified version of the SLURM configuration tool. This version
-has fewer  options for creating a SLURM configuration file. The full version
-of the SLURM configuration tool is available at
+<p>This is a simplified version of the Slurm configuration tool. This version
+has fewer  options for creating a Slurm configuration file. The full version
+of the Slurm configuration tool is available at
 <a href="configurator.html">configurator.html</a>.</p>
 
-<P><B>This tool supports SLURM version @SLURM_MAJOR@.@SLURM_MINOR@ only.</B>
-Configuration files for other versions of SLURM should be built
+<P><B>This tool supports Slurm version @SLURM_MAJOR@.@SLURM_MINOR@ only.</B>
+Configuration files for other versions of Slurm should be built
 using the tool distributed with it in <i>doc/html/configurator.html</i>.
 Some parameters will be set to default values, but you can
 manually edit the resulting <I>slurm.conf</I> as desired
 for greater flexibility. See <I>man slurm.conf</I> for more
 details about the configuration parameters.</P>
 
-<P>Note the while SLURM daemons create log files and other files as needed,
+<P>Note the while Slurm daemons create log files and other files as needed,
 it treats the lack of parent directories as a fatal error.
 This prevents the daemons from running if critical file systems are
 not mounted and will minimize the risk of cold-starting (starting
@@ -215,13 +215,13 @@ in your cluster.</P>
 <P>After you have filled in the fields of interest, use the
 "Submit" button on the bottom of the page to build the <I>slurm.conf</I>
 file. It will appear on your web browser. Save the file in text format
-as <I>slurm.conf</I> for use by SLURM.
+as <I>slurm.conf</I> for use by Slurm.
 
-<P>For more information about SLURM, see
+<P>For more information about Slurm, see
 <A HREF="http://slurm.schedmd.com/slurm.html">http://slurm.schedmd.com/slurm.html</A>
 
 <H2>Control Machines</H2>
-Define the hostname of the computer on which the SLURM controller and
+Define the hostname of the computer on which the Slurm controller and
 optional backup controller will execute. You can also specify addresses
 of these computers if desired (defaults to their hostnames).
 The IP addresses can be either numeric IP addresses or names.
@@ -294,8 +294,8 @@ consumable resource with the select/cons_res plug-in. See below
 under Resource Selection.
 <P>
 
-<H2>SLURM User</H2>
-The SLURM controller (slurmctld) can run without elevated privileges,
+<H2>Slurm User</H2>
+The Slurm controller (slurmctld) can run without elevated privileges,
 so it is recommended that a user "slurm" be created for it. For testing
 purposes any user name can be used.
 <P>
@@ -305,7 +305,7 @@ purposes any user name can be used.
 <H2>State Preservation</H2>
 Define the location of a directory where the slurmctld daemon saves its state.
 This should be a fully qualified pathname which can be read and written to
-by the SLURM user on both the control machine and backup controller (if configured).
+by the Slurm user on both the control machine and backup controller (if configured).
 The location of a directory where slurmd saves state should also be defined.
 This must be a unique directory on each compute server (local disk).
 The use of a highly reliable file system (e.g. RAID) is recommended.
@@ -359,7 +359,7 @@ handling required (InfiniBand, Myrinet, Ethernet, etc.)<BR>
 <P>
 
 <H2>Default MPI Type</H2>
-Specify the type of MPI to be used by default. SLURM will configure environment
+Specify the type of MPI to be used by default. Slurm will configure environment
 variables accordingly. Users can over-ride this specification with an srun option.<BR>
 Select one value for <B>MpiDefault</B>:<BR>
 <input type="radio" name="mpi_default" value="mpichgm"> <B>MPICH-GM</B><BR>
@@ -386,11 +386,11 @@ Linux <i>cgroup</i> to create a job container and track processes.
 Build a <i>cgroup.conf</i> file as well<BR>
 <input type="radio" name="proctrack_type" value="cray"> <B>Cray</B>: Cray proprietary process tracking<BR>
 <input type="radio" name="proctrack_type" value="pgid" checked> <B>Pgid</B>: Use Unix
-Process Group ID, processes changing their process group ID can escape from SLURM
+Process Group ID, processes changing their process group ID can escape from Slurm
 control<BR>
 <input type="radio" name="proctrack_type" value="linuxproc"> <B>LinuxProc</B>: Use
 parent process ID records, required for MPICH-GM use, processes can escape
-from SLURM control<BR>
+from Slurm control<BR>
 <input type="radio" name="proctrack_type" value="rms"> <B>RMS</B>: Use Quadrics
 kernel infrastructure, recommended for systems where this is available<BR>
 <input type="radio" name="proctrack_type" value="sgi_job"> <B>SGI's PAGG
@@ -478,7 +478,7 @@ log goes to syslog, string "%h" in name gets replaced with hostname)
 <P>
 
 <H2>Job Accounting Gather</H2>
-SLURM accounts for resource use per job.  System specifics can be polled
+Slurm accounts for resource use per job.  System specifics can be polled
 determined by system type<BR>
 Select one value for <B>JobAcctGatherType</B>:<BR>
 <input type="radio" name="job_acct_gather_type" value="none" checked> <B>None</B>: No
@@ -489,7 +489,7 @@ AIX process table information gathered, use with AIX systems only<BR>
 Linux process table information gathered, use with Linux systems only<BR>
 
 <H2>Job Accounting Storage</H2>
-Used with the Job Accounting Gather SLURM can store the accounting information in many different fashions.  Fill in your systems choice here<BR>
+Used with the Job Accounting Gather Slurm can store the accounting information in many different fashions.  Fill in your systems choice here<BR>
 Select one value for <B>AccountingStorageType</B>:<BR>
 <input type="radio" name="accounting_storage_type" value="none" checked> <B>None</B>:
 No job accounting storage<BR>
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index ec08b412d5b..ad6e8c5bcf5 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -4,25 +4,25 @@ Copyright (C) 2008-2011 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 Written by Morris Jette <jette1@llnl.gov> and Danny Auble <da@llnl.gov>
 
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 You should have received a copy of the GNU General Public License along
-with SLURM; if not, write to the Free Software Foundation, Inc.,
+with Slurm; if not, write to the Free Software Foundation, Inc.,
 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 -->
 <HTML>
-<HEAD><TITLE>SLURM System Configuration Tool</TITLE>
+<HEAD><TITLE>Slurm System Configuration Tool</TITLE>
 <SCRIPT type="text/javascript">
 <!--
 function get_field(name,form)
@@ -297,24 +297,24 @@ function displayfile()
 </HEAD>
 <BODY>
 <FORM name=config>
-<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool</H1>
-<P>This form can be used to create a SLURM configuration file with
+<H1>Slurm Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool</H1>
+<P>This form can be used to create a Slurm configuration file with
 you controlling many of the important configuration parameters.</P>
 
-<p>This is the full version of the SLURM configuration tool. This version
-has all the configuration options to create a SLURM configuration file. There
-is a simplified version of the SLURM configuration tool available at
+<p>This is the full version of the Slurm configuration tool. This version
+has all the configuration options to create a Slurm configuration file. There
+is a simplified version of the Slurm configuration tool available at
 <a href="configurator.easy.html">configurator.easy.html</a>.</p>
 
-<P><B>This tool supports SLURM version @SLURM_MAJOR@.@SLURM_MINOR@ only.</B>
-Configuration files for other versions of SLURM should be built
+<P><B>This tool supports Slurm version @SLURM_MAJOR@.@SLURM_MINOR@ only.</B>
+Configuration files for other versions of Slurm should be built
 using the tool distributed with it in <i>doc/html/configurator.html</i>.
 Some parameters will be set to default values, but you can
 manually edit the resulting <I>slurm.conf</I> as desired
 for greater flexibility. See <I>man slurm.conf</I> for more
 details about the configuration parameters.</P>
 
-<P>Note the while SLURM daemons create log files and other files as needed,
+<P>Note the while Slurm daemons create log files and other files as needed,
 it treats the lack of parent directories as a fatal error.
 This prevents the daemons from running if critical file systems are
 not mounted and will minimize the risk of cold-starting (starting
@@ -326,13 +326,13 @@ in your cluster.</P>
 <P>After you have filled in the fields of interest, use the
 "Submit" button on the bottom of the page to build the <I>slurm.conf</I>
 file. It will appear on your web browser. Save the file in text format
-as <I>slurm.conf</I> for use by SLURM.
+as <I>slurm.conf</I> for use by Slurm.
 
-<P>For more information about SLURM, see
+<P>For more information about Slurm, see
 <A HREF="http://slurm.schedmd.com/slurm.html">http://slurm.schedmd.com/slurm.html</A>
 
 <H2>Control Machines</H2>
-Define the hostname of the computer on which the SLURM controller and
+Define the hostname of the computer on which the Slurm controller and
 optional backup controller will execute. You can also specify addresses
 of these computers if desired (defaults to their hostnames).
 The IP addresses can be either numeric IP addresses or names.
@@ -411,8 +411,8 @@ consumable resource with the select/cons_res plug-in. See below
 under Resource Selection.
 <P>
 
-<H2>SLURM User</H2>
-The SLURM controller (slurmctld) can run without elevated privileges,
+<H2>Slurm User</H2>
+The Slurm controller (slurmctld) can run without elevated privileges,
 so it is recommended that a user "slurm" be created for it. For testing
 purposes any user name can be used.
 <P>
@@ -439,11 +439,11 @@ the system.  You MUST run "scontrol reconfig" to update the cache
 after making any changes to system password or group databases.
 <P>
 
-<H2>SLURM Port Numbers</H2>
-The SLURM controller (slurmctld) requires a unique port for communications
-as do the SLURM compute node daemons (slurmd). If not set, slurm ports
+<H2>Slurm Port Numbers</H2>
+The Slurm controller (slurmctld) requires a unique port for communications
+as do the Slurm compute node daemons (slurmd). If not set, slurm ports
 are set by checking for an entry in <I>/etc/services</I> and if that
-fails by using an interval default set at SLURM build time.
+fails by using an interval default set at Slurm build time.
 <P>
 <input type="text" name="slurmctld_port" value="6817"> <B>SlurmctldPort</B>
 <P>
@@ -451,7 +451,7 @@ fails by using an interval default set at SLURM build time.
 <P>
 
 <H2>Authentication and Security</H2>
-Define the method used for authenticating communicating between SLURM components.<BR>
+Define the method used for authenticating communicating between Slurm components.<BR>
 Select one value for <B>AuthType</B>:<BR>
 <input type="radio" name="auth_type" value="none"> <B>None</B>: No authentication.
 Not recommended for production use.<br>
@@ -467,10 +467,10 @@ Select one value for <B>CryptoType</B>:<BR>
 <input type="radio" name="crypto_type" value="openssl"> <B>OpenSSL</B>:
 <A href="http://www.openssl.org/">OpenSSL</A>
 <P>
-Define the location of public and private keys used by SLURM's
+Define the location of public and private keys used by Slurm's
 cryptographic signature generation plugin (CryptoType).<br>
 <b>These values are only used if CryptoType=OpenSSL.</b><br>
-These files need to be generated by the SLURM administrator.
+These files need to be generated by the Slurm administrator.
 Specify fully qualified pathnames.
 <P>
 <input type="text" name="private_key"> <B>JobCredentialPrivateKey</B>
@@ -481,7 +481,7 @@ Specify fully qualified pathnames.
 <H2>State Preservation</H2>
 Define the location of a directory where the slurmctld daemon saves its state.
 This should be a fully qualified pathname which can be read and written to
-by the SLURM user on both the control machine and backup controller (if configured).
+by the Slurm user on both the control machine and backup controller (if configured).
 The location of a directory where slurmd saves state should also be defined.
 This must be a unique directory on each compute server (local disk).
 The use of a highly reliable file system (e.g. RAID) is recommended.
@@ -542,7 +542,7 @@ handling required (InfiniBand, Myrinet, Ethernet, etc.)<BR>
 <P>
 
 <H2>Default MPI Type</H2>
-Specify the type of MPI to be used by default. SLURM will configure environment
+Specify the type of MPI to be used by default. Slurm will configure environment
 variables accordingly. Users can over-ride this specification with an srun option.<BR>
 Select one value for <B>MpiDefault</B>:<BR>
 <input type="radio" name="mpi_default" value="mpichgm"> <B>MPICH-GM</B><BR>
@@ -569,11 +569,11 @@ Linux <i>cgroups</i> to create a job container and track processes.
 Build a <i>cgroup.conf</i> file as well<BR>
 <input type="radio" name="proctrack_type" value="cray"> <B>Cray</B>: Cray proprietary process tracking<BR>
 <input type="radio" name="proctrack_type" value="pgid" checked> <B>Pgid</B>: Use Unix
-Process Group ID, processes changing their process group ID can escape from SLURM
+Process Group ID, processes changing their process group ID can escape from Slurm
 control<BR>
 <input type="radio" name="proctrack_type" value="linuxproc"> <B>LinuxProc</B>: Use
 parent process ID records, required for MPICH-GM use, processes can escape
-from SLURM control<BR>
+from Slurm control<BR>
 <input type="radio" name="proctrack_type" value="rms"> <B>RMS</B>: Use Quadrics
 kernel infrastructure, recommended for systems where this is available<BR>
 <input type="radio" name="proctrack_type" value="sgi_job"> <B>SGI's PAGG
@@ -733,7 +733,7 @@ Password we are to use to talk to the database for Job completion<br>
 <P>
 
 <H2>Job Accounting Gather</H2>
-SLURM accounts for resource use per job.  System specifics can be polled
+Slurm accounts for resource use per job.  System specifics can be polled
 determined by system type<BR>
 Select one value for <B>JobAcctGatherType</B>:<BR>
 <input type="radio" name="job_acct_gather_type" value="none" checked> <B>None</B>: No
@@ -747,7 +747,7 @@ polling interval in seconds. Zero disables periodic sampling.<BR>
 <P>
 
 <H2>Job Accounting Storage</H2>
-Used with the Job Accounting Gather SLURM can store the accounting information in many different fashions.  Fill in your systems choice here<BR>
+Used with the Job Accounting Gather Slurm can store the accounting information in many different fashions.  Fill in your systems choice here<BR>
 Select one value for <B>AccountingStorageType</B>:<BR>
 <input type="radio" name="accounting_storage_type" value="none" checked> <B>None</B>:
 No job accounting storage<BR>
@@ -796,7 +796,7 @@ Specify a specify the fully qualified pathname for the file.
 <P>
 
 <H2>Timers</H2>
-SLURM has a variety of timers to control when to consider a node DOWN,
+Slurm has a variety of timers to control when to consider a node DOWN,
 when to purge job records, how long to give a job to gracefully terminate, etc.
 <P>
 <input type="text" name="slurmctld_timeout" value="120">
@@ -804,16 +804,16 @@ when to purge job records, how long to give a job to gracefully terminate, etc.
 becoming the master controller
 <P>
 <input type="text" name="slurmd_timeout" value="300">
-<B>SlurmdTimeout</B>: How many seconds the SLURM controller waits for the slurmd
+<B>SlurmdTimeout</B>: How many seconds the Slurm controller waits for the slurmd
 to respond to a request before considering the node DOWN
 <P>
 <input type="text" name="inactive_limit" value="0">
-<B>InactiveLimit</B>: How many seconds the SLURM controller waits for srun
+<B>InactiveLimit</B>: How many seconds the Slurm controller waits for srun
 commands to respond before considering the job or job step inactive and
 terminating it. A value of zero indicates unlimited wait
 <P>
 <input type="text" name="min_job_age" value="300">
-<B>MinJobAge</B>: How many seconds the SLURM controller waits after a
+<B>MinJobAge</B>: How many seconds the Slurm controller waits after a
 job terminates before purging its record. A record of the job will
 persist in job completion and/or accounting records indefinitely,
 but will no longer be visible with the squeue command after puring
diff --git a/doc/html/cons_res_share.shtml b/doc/html/cons_res_share.shtml
index ee3319e9345..d91bfc98435 100644
--- a/doc/html/cons_res_share.shtml
+++ b/doc/html/cons_res_share.shtml
@@ -127,7 +127,7 @@ of each node in the partition.
 </P>
 <H3>Nodes in Multiple Partitions</H3>
 <P>
-SLURM has supported configuring nodes in more than one partition since version
+Slurm has supported configuring nodes in more than one partition since version
 0.7.0. The following table describes how nodes configured in two partitions with
 different <CODE>Shared</CODE> settings will be allocated to jobs. Note that
 "shared" jobs are jobs that are submitted to partitions configured with
diff --git a/doc/html/core_spec_plugins.shtml b/doc/html/core_spec_plugins.shtml
index 5be4f3cdadf..e33e4c0d764 100644
--- a/doc/html/core_spec_plugins.shtml
+++ b/doc/html/core_spec_plugins.shtml
@@ -8,7 +8,7 @@ defines them. It is intended as a resource to programmers wishing to write
 their own Slurm core specialization plugins. This is version 100 of the API.
 
 <p>Slurm core specialization plugins must conform to the
-SLURM Plugin API with the following specifications:
+Slurm Plugin API with the following specifications:
 
 <p><span class="commandline">const char
 plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
@@ -57,8 +57,8 @@ implemented must be stubbed.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/cpu_management.shtml b/doc/html/cpu_management.shtml
index 62ac751c405..99953dd5ffe 100644
--- a/doc/html/cpu_management.shtml
+++ b/doc/html/cpu_management.shtml
@@ -1320,7 +1320,7 @@ srun/salloc/sbatch option:
 			<td width="70%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
 This option provides a list of the CPU masks used by task affinity to bind tasks to CPUs. 
-Note that the CPU ids represented by these masks are Linux/hardware CPU ids, not SLURM 
+Note that the CPU ids represented by these masks are Linux/hardware CPU ids, not Slurm 
 abstract CPU ids as reported by scontrol, etc.
 </font></p>
 			</td>
@@ -1576,7 +1576,7 @@ PartitionName=hypernode Nodes=n3 State=UP
 The <font face="Courier New, monospace">SelectType=select/linear</font>
 configuration option specifies allocation in units of whole nodes.
 The<font face="Courier New, monospace"> --nodes=2</font> srun option causes
-SLURM to allocate at least 2 nodes to the job.<p></p>
+Slurm to allocate at least 2 nodes to the job.<p></p>
 <br>
 <a name="Example2"></a>
 <h3>Example 2: Simple allocation of cores as consumable resources</h3>
@@ -1844,7 +1844,7 @@ SelectTypeParameters=CR_Core
 </pre>
 <p>Comments:</p>
 <p>The options specify the following conditions for the job: 6 tasks, 2 unique CPUs per task, 
-using exactly 2 nodes, and with 3 tasks per node. To satisfy these conditions, SLURM 
+using exactly 2 nodes, and with 3 tasks per node. To satisfy these conditions, Slurm 
 must allocate 6 CPUs from each of the 2 nodes. The <font face="Courier New, monospace">
 --distribution=cyclic</font> option causes the tasks to be distributed to the nodes in a 
 round-robin fashion. The following table shows a possible pattern of allocation and 
@@ -1935,7 +1935,7 @@ SelectTypeParameters=CR_Core
 per task, using all 3 nodes in the partition. To satisfy these conditions using 
 the default allocation method across nodes (block), Slurm allocates 8 CPUs from 
 the first node, 6 CPUs from the second node and 2 CPUs from the third node. 
-The <font face="Courier New, monospace">--distribution=plane=2</font> option causes SLURM 
+The <font face="Courier New, monospace">--distribution=plane=2</font> option causes Slurm 
 to distribute tasks in blocks of two to each of the nodes in a round-robin fashion,
 subject to the number of CPUs allocated on each node.  So, for example, only 1 task
 is distributed to the third node because only 2 CPUs were allocated on that node and
diff --git a/doc/html/cray_alps.shtml b/doc/html/cray_alps.shtml
index fdb934eb4cd..d9aec92cf38 100644
--- a/doc/html/cray_alps.shtml
+++ b/doc/html/cray_alps.shtml
@@ -36,7 +36,7 @@ can be translated so there are options that are not available.
 The <i>srun</i> option <i>--launcher-opts=</i> can be used
 to specify <i>aprun</i> options which lack an equivalent within <i>srun</i>.
 For example, <i>srun --launcher-opts="-a xt" -n 4 a.out</i>.
-Since <i>aprun</i> is used to launch tasks (the equivalent of a SLURM
+Since <i>aprun</i> is used to launch tasks (the equivalent of a Slurm
 job step), the job steps will not be visible using Slurm commands.
 Other than Slurm's <i>srun</i> command being replaced by <i>aprun</i>
 and the job steps not being visible, all other Slurm commands will operate
@@ -177,7 +177,7 @@ batch job with a node count specification of zero.</p>
 sbatch -N0 pre_process.bash
 </pre>
 <p><b>Note</b>: Job allocations with zero compute nodes can only be made in
-SLURM partitions explicitly configured with <b>MinNodes=0</b> (the default
+Slurm partitions explicitly configured with <b>MinNodes=0</b> (the default
 minimum node count for a partition is one compute node).</p>
 
 <h3>External Node Use</h3>
@@ -626,7 +626,7 @@ from the user's environment, lower limits may apply to user jobs, but this
 script will insure that higher limits are possible. Copy the file
 <i>contribs/cray/etc_sysconfig_slurm</i> into <i>/etc/sysconfig/slurm</i>
 for these limits to take effect. This script is executed from
-<i>/etc/init.d/slurm</i>, which is typically executed to start the SLURM
+<i>/etc/init.d/slurm</i>, which is typically executed to start the Slurm
 daemons. An excerpt of <i>contribs/cray/etc_sysconfig_slurm</i> is shown
 below.</p>
 
diff --git a/doc/html/crypto_plugins.shtml b/doc/html/crypto_plugins.shtml
index 1af3d402ce5..210a15c4705 100644
--- a/doc/html/crypto_plugins.shtml
+++ b/doc/html/crypto_plugins.shtml
@@ -6,7 +6,7 @@
 <p> This document describe. Slurm cryptographic plugins and the API that
 defines them.
 It is intended as a resource to programmers wishing to write their own
-SLURM cryptographic plugins.
+Slurm cryptographic plugins.
 This is version 0 of the API.</p>
 
 <p>Slurm cryptographic plugins are Slurm plugins that implement
@@ -74,8 +74,8 @@ Functions which are not implemented should be stubbed.</p>
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/disclaimer.shtml b/doc/html/disclaimer.shtml
index e15fa6ac630..0d17a998b93 100644
--- a/doc/html/disclaimer.shtml
+++ b/doc/html/disclaimer.shtml
@@ -44,7 +44,7 @@ can be found at the <a href="team.html">Slurm Team</a> web page.</p>
 
 <p>While many organizations contributed code and/or documentation without
 including a copyright notice, the following copyright notices are found in
-SLURM's code and documentation files:<br>
+Slurm's code and documentation files:<br>
 Copyright (C) 2013 Cray<br>
 Copyright (C) 2011 Trinity Centre for High Performance Computing<br>
 Copyright (C) 2010-2013 SchedMD LLC<br>
diff --git a/doc/html/elastic_computing.shtml b/doc/html/elastic_computing.shtml
index 43de974bf3b..5067dfe0726 100644
--- a/doc/html/elastic_computing.shtml
+++ b/doc/html/elastic_computing.shtml
@@ -128,7 +128,7 @@ allocated, the <i>ResumeProgram</i> is executed and should do the following:</p>
 Note that configuration file will generally be identical on all nodes and not
 include NodeAddr or NodeHostname configuration parameters for any nodes in the
 cloud.
-SLURM commands executed on this node only need to communicate with the
+Slurm commands executed on this node only need to communicate with the
 slurmctld daemon on the ControlMachine.
 <li>Notify the slurmctld daemon of the node's hostname and network address:<br>
 <i>scontrol update nodename=ec0 nodeaddr=123.45.67.89 nodehostname=whatever</i><br>
diff --git a/doc/html/ext_sensorsplugins.shtml b/doc/html/ext_sensorsplugins.shtml
index bb03f8c6d35..ff0f0b118e9 100644
--- a/doc/html/ext_sensorsplugins.shtml
+++ b/doc/html/ext_sensorsplugins.shtml
@@ -9,7 +9,7 @@ defines them. It is intended as a resource to programmers wishing to write
 their own Slurm external sensors plugins.
 
 <p>Slurm external sensors plugins must conform to the
-SLURM Plugin API with the following specifications:
+Slurm Plugin API with the following specifications:
 
 <p><span class="commandline">const char
 plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
@@ -53,8 +53,8 @@ implemented must be stubbed.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 56c310ff4a4..1482f382f36 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -727,7 +727,7 @@ a new node count (<i>NumNodes=</i>) for the job or identify the specific nodes
 (<i>NodeList=</i>) that you want the job to retain.
 Any job steps running on the nodes which are relinquished by the job will be
 killed unless initiated with the <i>--no-kill</i> option.
-After the job size is changed, some environment variables created by SLURM
+After the job size is changed, some environment variables created by Slurm
 containing information about the job's environment will no longer be valid and
 should either be removed or altered (e.g. SLURM_NNODES, SLURM_NODELIST and
 SLURM_NPROCS).
@@ -847,7 +847,7 @@ salloc: Relinquishing job allocation 65542
 </pre>
 
 <p><a name="mpi_symbols"><b>25. Why is my MPIHCH2 or MVAPICH2 job not running with
-SLURM? Why does the DAKOTA program not run with Slurm?</b></a><br>
+Slurm? Why does the DAKOTA program not run with Slurm?</b></a><br>
 The Slurm library used to support MPIHCH2 or MVAPICH2 references a variety of
 symbols. If those symbols resolve to functions or variables in your program
 rather than the appropriate library, the application will fail. For example
@@ -1697,8 +1697,8 @@ dset -set_as_default TV::bulk_launch_tmpfile1_host_lines {%R}
 </pre></p>
 <!-- OLD FORMAT
 dset TV::parallel_configs {
-	name: SLURM;
-	description: SLURM;
+	name: Slurm;
+	description: Slurm;
 	starter: srun %s %p %a;
 	style: manager_process;
 	tasks_option: -n;
diff --git a/doc/html/gang_scheduling.shtml b/doc/html/gang_scheduling.shtml
index 0092510b477..e3856ced55e 100644
--- a/doc/html/gang_scheduling.shtml
+++ b/doc/html/gang_scheduling.shtml
@@ -3,11 +3,11 @@
 <H1>Gang Scheduling</H1>
 
 <P>
-SLURM supports timesliced gang scheduling in which two or more jobs are
+Slurm supports timesliced gang scheduling in which two or more jobs are
 allocated to the same resources and these jobs are alternately suspended to
 let one job at a time have dedicated access to the resources for a configured
 period of time.
-SLURM also supports preemptive priority job scheduling in which a a higher
+Slurm also supports preemptive priority job scheduling in which a a higher
 priority job can preempt a lower priority one until the higher priority job
 completes.
 See the <a href="preempt.html">Preemption</a> document for more information.
diff --git a/doc/html/gres.shtml b/doc/html/gres.shtml
index 37a373fb3eb..5e0b22afe28 100644
--- a/doc/html/gres.shtml
+++ b/doc/html/gres.shtml
@@ -79,7 +79,7 @@ CPUs option for improved speed in the Slurm scheduling logic.
 
 <LI><B>File</B> Fully qualified pathname of the device files associated with a
 resource.
-The name can include a numeric range suffix to be interpreted by SLURM
+The name can include a numeric range suffix to be interpreted by Slurm
 (e.g. <I>File=/dev/nvidia[0-3]</I>).
 This field is generally required if enforcement of generic resource
 allocations are to be supported (i.e. prevents a user from making
@@ -189,7 +189,7 @@ file and insure they are in the increasing numeric order.</P>
 
 <P>Slurm can be used to provide resource management for systems with the
 Intel&reg; Many Integrated Core (MIC) processor.
-SLURM sets an OFFLOAD_DEVICES environment variable, which controls the
+Slurm sets an OFFLOAD_DEVICES environment variable, which controls the
 selection of MICs available to a job step.
 The OFFLOAD_DEVICES environment variable is used by both Intel
 LEO (Language Extensioins for Offload) and the MKL (Math Kernel Library)
diff --git a/doc/html/gres_design.shtml b/doc/html/gres_design.shtml
index 1b50aeacdc1..7884c2dda5c 100644
--- a/doc/html/gres_design.shtml
+++ b/doc/html/gres_design.shtml
@@ -12,7 +12,7 @@ to provide details abou. Slurm's implementation of GRES support including the
 relevant data structures. For an overview of GRES configuration and use, see
 <a href="gres.html">Generic Resource (GRES) Scheduling</a>. For details about
 the APIs provided by GRES plugins, see <a href="gres_plugins.html">
-SLURM Generic Resource (GRES) Plugin API</a>.</p>
+Slurm Generic Resource (GRES) Plugin API</a>.</p>
 
 <h2>Data Structures</h2>
 
diff --git a/doc/html/gres_plugins.shtml b/doc/html/gres_plugins.shtml
index f809d4907b8..b0ff01cfadf 100644
--- a/doc/html/gres_plugins.shtml
+++ b/doc/html/gres_plugins.shtml
@@ -7,7 +7,7 @@
 defines them. It is intended as a resource to programmers wishing to write
 their own Slurm job submit plugins.
 <p>Slurm generic resource plugins must conform to the
-SLURM Plugin API with the following specifications:
+Slurm Plugin API with the following specifications:
 
 <p><span class="commandline">const char
 gres_name[]="<i>gres_name</i>"</span><br>
@@ -61,8 +61,8 @@ implemented must be stubbed.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/hdf5_profile_user_guide.shtml b/doc/html/hdf5_profile_user_guide.shtml
index 782a08075f6..881b3eb27d4 100644
--- a/doc/html/hdf5_profile_user_guide.shtml
+++ b/doc/html/hdf5_profile_user_guide.shtml
@@ -38,7 +38,7 @@ daemon via RPC may not scale to very large clusters or jobs with
 many allocated nodes.</p>
 
 <p>A separate <a href="acct_gather_profile_plugins.html">
-SLURM Profile Accounting Plugin API (AcctGatherProfileType)</a> documents how
+Slurm Profile Accounting Plugin API (AcctGatherProfileType)</a> documents how
 to write other Profile Accounting plugins.</P>
 
 <a id="Administration"></a>
diff --git a/doc/html/high_throughput.shtml b/doc/html/high_throughput.shtml
index e5a39a0aa51..3dc312a828f 100644
--- a/doc/html/high_throughput.shtml
+++ b/doc/html/high_throughput.shtml
@@ -187,7 +187,7 @@ not appear to add any measurable overhead.</li>
 appropriate for your environment.</li>
 </ul>
 
-<h2>SLURMDBD Configuration</h2>
+<h2>SlurmDBD Configuration</h2>
 
 <p>Starting in Slurm 14.11 there was work done to improve the speedup
 of the database which makes turning accounting off a minimal
diff --git a/doc/html/ibm-pe.shtml b/doc/html/ibm-pe.shtml
index f6c2089784b..0185cb914a4 100644
--- a/doc/html/ibm-pe.shtml
+++ b/doc/html/ibm-pe.shtml
@@ -44,7 +44,7 @@ the process name represents the version number of the "pmd" process and is
 subject to change.</li>
 <li>The poe command interacts with the pmdv12 process to launch the application
 tasks, handle their I/O, etc. Since the task launch procedure occurs outside of
-SLURM's control, none of the normal task-level Slurm support is available.</li>
+Slurm's control, none of the normal task-level Slurm support is available.</li>
 <li>The poe command, through the Slurm library, reports the completion of the
 job step.</li>
 </ol>
@@ -245,7 +245,7 @@ This effects the following options:</p>
 
 <p>For the srun command's --multi-prog option (Multiple Program,
 Multiple Data configurations), the command file will be translated from
-SLURM's format to a POE format. POE does not suppor. Slurm expressions
+Slurm's format to a POE format. POE does not suppor. Slurm expressions
 in the MPMD configuration file (e.g. "%t" will not be replaced with the task's
 number and "%o" will not be replaced with the task's offset within
 this range). The command file will be stored in a temporary file
@@ -410,7 +410,7 @@ If the files are not installed there, you can specify a different location using
 the <b>--with-nrth=PATH</b> option to the configure program, where "PATH" is
 the fully qualified pathname of the parent directory(ies) of the nrt.h and
 permapi.h files.
-SLURM searches for the libnrt.so file in the /usr/lib and /usr/lib64 directories
+Slurm searches for the libnrt.so file in the /usr/lib and /usr/lib64 directories
 by default. If the file is not installed there, you can specify a different
 location using the <b>--with-nrtlib=PATH</b> option to the configure program,
 where "PATH" is the fully qualified pathname of the parent directory of the
@@ -432,7 +432,7 @@ For example:</p>
   command is loading and using the libpermapi.so library initially
   from the /usr/lib64 directory. It later reads the /etc/poe.limits
   file and loads the library listed there.  In order for poe to work
-  with Slurm, it needs to use the "libpermapi.so" generated by SLURM
+  with Slurm, it needs to use the "libpermapi.so" generated by Slurm
   for all of its functions.  Until poe is modified to only load the
   correct library, it is necessary for /usr/lib64/libpermapi.so to
   contain Slurm's library or a link to it.</p>
@@ -498,7 +498,7 @@ Start Slurm daemons clean: /etc/init.d/slurm startclean
 <p>It is necessary for all nodes that can be used for scheduling a single job
 have the same network adapter types and count. For example, if node "tux1"
 has two ethernet adapters then the node "tux2" in the same cluster must also
-have two ethernet adapters on the same networks or be in a different SLURM
+have two ethernet adapters on the same networks or be in a different Slurm
 partition so that one job can not be allocated resources on both nodes.
 Without this restriction, a job may allocated adapter resources on one node
 and be unable to allocate the corresponding adapter resources on another
@@ -506,7 +506,7 @@ node.</p>
 
 <p>It is possible to configure Slurm and LoadLeveler to simultaneously exist
 on a cluster, however each scheduler must be configured to manage different
-compute nodes (e.g. LoadLeveler can manage compute nodes "tux[1-8]" and SLURM
+compute nodes (e.g. LoadLeveler can manage compute nodes "tux[1-8]" and Slurm
 can manage compute nodes "tux[9-16]" on the same cluster). In addition, the
 /etc/poe.limits file on each node must identify the MP_PE_RMLIB appropriate
 for that node (e.g. IBM's or Slurm's libpermapi.so).
diff --git a/doc/html/ibm.shtml b/doc/html/ibm.shtml
index 38180a3d5d6..821ea3c9d97 100644
--- a/doc/html/ibm.shtml
+++ b/doc/html/ibm.shtml
@@ -6,7 +6,7 @@
 
 <p>This document describes the unique features of Slurm on the
 IBM AIX computers with a Federation switch.
-SLURM support for AIX has been thoroughly tested, but we
+Slurm support for AIX has been thoroughly tested, but we
 know of no AIX installations using Slurm after 2008.
 You should be familiar with the Slurm's mode of operation on Linux clusters
 before studying the relatively few differences in IBM system operation
@@ -21,7 +21,7 @@ AIX system, it does not support use of the Federation switch or IBM's MPI.
 Job steps should be launched using IBM's poe command.
 This architecture insures proper operation of all IBM tools.</p>
 
-<p>You will use srun to submit a batch script to SLURM.
+<p>You will use srun to submit a batch script to Slurm.
 This script should contain one or more invocations of poe to launch
 the tasks.
 If you want to run a job interactively, just execute poe directly.
@@ -67,9 +67,9 @@ This component is packaged with the Slurm distribution.</li>
 <li>There is a process tracking kernel extension required.
 This is used to insure that all processes associated with a job
 are tracked.
-SLURM normally uses session ID and process group ID on Linux systems,
+Slurm normally uses session ID and process group ID on Linux systems,
 but these mechanisms can not prevent user processes from establishing
-their own session or process group and thus "escape" from SLURM
+their own session or process group and thus "escape" from Slurm
 tracking.
 This kernel extension is not packaged with Slurm, but is available
 upon request.</li>
diff --git a/doc/html/job_exit_code.shtml b/doc/html/job_exit_code.shtml
index 3d88b0389dd..03633af6cff 100644
--- a/doc/html/job_exit_code.shtml
+++ b/doc/html/job_exit_code.shtml
@@ -15,7 +15,7 @@ result in a Job State of FAILED with a Reason of
 
 <p>The exit code is an 8 bit unsigned number ranging between 0 and
 255.  While it is possible for a job to return a negative exit code,
-SLURM will display it as an unsigned value in the 0 - 255 range.</p>
+Slurm will display it as an unsigned value in the 0 - 255 range.</p>
 
 <h2>Job Step Exit Codes</h2>
 
@@ -26,7 +26,7 @@ record.</p>
 <h2>Signaled Jobs</h2>
 
 <p>When a job or step is sent a signal that causes its termination,
-SLURM also captures the signal number and saves it to the job or step
+Slurm also captures the signal number and saves it to the job or step
 record.</p>
 
 <h2>Displaying Exit Codes and Signals</h2>
@@ -41,7 +41,7 @@ signal number will be displayed after the exit code, delineated by a
 colon(:).</p>
 
 <h2>Database Job/Step Records</h2>
-<p>The Slurm control daemon sends job and step records to the SLURM
+<p>The Slurm control daemon sends job and step records to the Slurm
 database when the Slurm accounting_storage plugin is installed.  Job
 and step records sent to the Slurm db can be viewed using the
 <b>sacct</b> command.  The default <b>sacct</b> output contains an
@@ -64,7 +64,7 @@ job's derived exit code is determined by the Slurm control daemon
 and sent to the database when the accounting_storage plugin is
 enabled.</p>
 
-<p>In addition to the derived exit code, the job record in the SLURM
+<p>In addition to the derived exit code, the job record in the Slurm
 database contains a comment string.  This is initialized to the job's
 comment string (when AccountingStoreJobComment parameter in the
 slurm.conf is set) and can only be changed by the user.</p>
diff --git a/doc/html/jobacct_gatherplugins.shtml b/doc/html/jobacct_gatherplugins.shtml
index fbd300cc98f..c80fe623a9d 100644
--- a/doc/html/jobacct_gatherplugins.shtml
+++ b/doc/html/jobacct_gatherplugins.shtml
@@ -8,7 +8,7 @@ defines them. It is intended as a resource to programmers wishing to write
 their own Slurm job accounting gather plugins.
 
 <p>Slurm job accounting gather plugins must conform to the
-SLURM Plugin API with the following specifications:
+Slurm Plugin API with the following specifications:
 
 <p><span class="commandline">const char
 plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
@@ -61,8 +61,8 @@ implemented must be stubbed.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/launch_plugins.shtml b/doc/html/launch_plugins.shtml
index 9842f178e72..aa47d902c92 100644
--- a/doc/html/launch_plugins.shtml
+++ b/doc/html/launch_plugins.shtml
@@ -9,7 +9,7 @@
   launch plugin.
 
 <p><span class="commandline">const char
-plugin_name[]="<i>launch&nbsp;SLURM&nbsp;plugin</i>"</span>
+plugin_name[]="<i>launch&nbsp;Slurm&nbsp;plugin</i>"</span>
 <p style="margin-left:.2in">
 
 <p><span class="commandline">const char
@@ -48,8 +48,8 @@ for a sample implementation of a Slurm launch plugin.
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/maui.shtml b/doc/html/maui.shtml
index fb0a349754e..76e4ad72e82 100644
--- a/doc/html/maui.shtml
+++ b/doc/html/maui.shtml
@@ -3,7 +3,7 @@
 <h1>Maui Scheduler Integration Guide</h1>
 <h2>Overview</h2>
 <p>Maui configuration is quite complicated and is really beyond the scope
-of any documents we could supply with SLURM.
+of any documents we could supply with Slurm.
 The best resource for Maui configuration information is the
 online documents at Cluster Resources Inc.:
 <a href="http://www.clusterresources.com/products/maui/docs/mauiadmin.shtml">
@@ -69,7 +69,7 @@ considering both the desired system responsiveness and the overhead of
 executing Maui daemons too frequently.</p>
 
 <p>In order for Maui to be able to access your Slurm partition, you will
-need to define in maui.conf a partition with the same name as the SLURM
+need to define in maui.conf a partition with the same name as the Slurm
 partition(s). For example if nodes "linux[0-3]" are in Slurm partition
 "PartA", slurm.conf includes a line of this sort:</p>
 <pre>
@@ -134,7 +134,7 @@ as those may conflict with those managed by Maui.</li>
 communications between Slurm and Maui or Moab.
 This use of this key is essential to insure that a user
 not build his own program to cancel other user's jobs in
-SLURM.
+Slurm.
 This should be no more than 32-bit unsigned integer and match
 the the encryption key in Maui (<i>--with-key</i> on the
 configure line) or Moab (<i>KEY</i> parameter in the
diff --git a/doc/html/moab.shtml b/doc/html/moab.shtml
index 11dfde764cb..cb30e81b576 100644
--- a/doc/html/moab.shtml
+++ b/doc/html/moab.shtml
@@ -3,7 +3,7 @@
 <h1>Moab Cluster Suite Integration Guide</h1>
 <h2>Overview</h2>
 <p>Moab Cluster Suite configuration is quite complicated and is
-beyond the scope of any documents we could supply with SLURM.
+beyond the scope of any documents we could supply with Slurm.
 The best resource for Moab configuration information is the
 online documents at Cluster Resources Inc.:
 <a href="http://www.clusterresources.com/products/mwm/docs/slurmintegration.shtml">
@@ -79,7 +79,7 @@ a man page distributed with Slurm.</p>
 communications between Slurm and Maui or Moab.
 This use of this key is essential to insure that a user
 not build his own program to cancel other user's jobs in
-SLURM.
+Slurm.
 This should be no more than 32-bit unsigned integer and match
 the encryption key in Maui (<i>--with-key</i> on the
 configure line) or Moab (<i>KEY</i> parameter in the
@@ -89,7 +89,7 @@ to submit new jobs, so even without this key, nobody can
 run jobs as another user.</p>
 
 <p><b>EPort</b> is an event notification port in Moab.
-When a job is submitted to or terminates in SLURM,
+When a job is submitted to or terminates in Slurm,
 Moab is sent a message on this port to begin an attempt
 to schedule the computer.
 This numeric value should match <i>EPORT</i> configured
@@ -110,7 +110,7 @@ BackupAddr configured in slurm.conf.</p>
 <p><b>ExcludePartitions</b> is used to identify partitions
 whose jobs are to be scheduled directly b. Slurm rather
 than Moab.
-This only affects jobs which are submitted using SLURM
+This only affects jobs which are submitted using Slurm
 commands (i.e. srun, salloc or sbatch, NOT msub from Moab).
 These jobs will be scheduled on a First-Come-First-Served
 basis.
@@ -120,7 +120,7 @@ will be outside of Moab's control.
 Note that Moab controls for resource reservation, fair share
 scheduling, etc. will not apply to the initiation of these jobs.
 If more than one partition is to be scheduled directly by
-SLURM, use a comma separator between their names.</p>
+Slurm, use a comma separator between their names.</p>
 
 <p><b>HidePartitionJobs</b> identifies partitions whose jobs are not
 to be reported to Moab.
@@ -199,10 +199,10 @@ CLIENTCFG[RM:slurm] KEY=123456789
 
 <h3>Job Submission</h3>
 
-<p>Jobs can either be submitted to Moab or directly to SLURM.
+<p>Jobs can either be submitted to Moab or directly to Slurm.
 Moab's <i>msub</i> command has a <i>--slurm</i> option that can
 be placed at the <b>end</b> of the command line and those options
-will be passed to Slurm. This can be used to invoke SLURM
+will be passed to Slurm. This can be used to invoke Slurm
 options which are not directly supported by Moab (e.g.
 system images to boot, task distribution specification across
 sockets, cores, and hyperthreads, etc.).
@@ -226,7 +226,7 @@ as user root:</p>
         "/bin/echo BEGIN; /bin/env; /bin/echo FINI"
 </pre>
 <p> For typical batch jobs, the job transfer from Moab to
-SLURM is performed using <i>sbatch</i> and occurs instantaneously.
+Slurm is performed using <i>sbatch</i> and occurs instantaneously.
 The environment is loaded by a Slurm daemon (slurmd) when the
 batch job begins execution.
 For interactive jobs (<i>msub -I ...</i>), the job transfer
@@ -244,7 +244,7 @@ file, slurm.conf. A value of zero results in immediately
 using the cache file. The default value is 2 seconds.</p>
 
 <p>We have provided a simple program that can be used to build
-cache files for users. The program can be found in the SLURM
+cache files for users. The program can be found in the Slurm
 distribution at <i>contribs/env_cache_builder.c</i>.
 This program can support a longer timeout than Moab, but
 will report errors for users for whom the environment file
diff --git a/doc/html/mpi_guide.shtml b/doc/html/mpi_guide.shtml
index cce20654c05..07a96982b81 100644
--- a/doc/html/mpi_guide.shtml
+++ b/doc/html/mpi_guide.shtml
@@ -12,7 +12,7 @@ MVAPICH, MVAPICH2, some MPICH1 modes, and OpenMPI version 1.5 or higher).</li>
 <li>Slurm creates a resource allocation for the job and then
 mpirun launches tasks using Slurm's infrastructure (LAM/MPI and HP-MPI).</li>
 <li>Slurm creates a resource allocation for the job and then
-mpirun launches tasks using some mechanism other than SLURM,
+mpirun launches tasks using some mechanism other than Slurm,
 such as SSH or RSH (BlueGene MPI and some MPICH1 modes).
 These tasks are initiated outside of Slurm's monitoring
 or control. Slurm's epilog should be configured to purge
@@ -126,9 +126,9 @@ launching the MPI jobs under the control of the Slurm job manager:</p>
 <p>Slurm is supported by the <i>mpirun</i> command of the Intel&reg; MPI Library 3.1
 Build 029 for Linux OS and later releases.</p>
 <p>When launched within a session allocated using the Slurm commands <i>sbatch</i> or
-<i>salloc</i>, the <i>mpirun</i> command automatically detects and queries certain SLURM
+<i>salloc</i>, the <i>mpirun</i> command automatically detects and queries certain Slurm
 environment variables to obtain the list of the allocated cluster nodes.</p>
-<p>Use the following commands to start an MPI job within an existing SLURM
+<p>Use the following commands to start an MPI job within an existing Slurm
 session over the MPD PM:</p>
 <pre>
 <i>export I_MPI_PROCESS_MANAGER=mpd
@@ -140,7 +140,7 @@ mpirun -n &lt;num_procs&gt; a.out</i>
 Update 3 through the Hydra PM by default. The behavior of this command is
 analogous to the MPD case described above.</p>
 <p>Use the one of the following commands to start an MPI job within an existing
-SLURM session over the Hydra PM:</p>
+Slurm session over the Hydra PM:</p>
 <pre>
 <i>mpirun -n &lt;num_procs&gt; a.out</i>
 </pre>
@@ -169,7 +169,7 @@ it, or to start an MPI job within a Slurm session already created using the
 <i>sbatch</i> or <i>salloc</i> commands:</p>
 <ul>
 <li>Set the <i>I_MPI_PMI_LIBRARY</i> environment variable to point to the
-SLURM Process Management Interface (PMI) library:</li>
+Slurm Process Management Interface (PMI) library:</li>
 <pre>
 <i>export I_MPI_PMI_LIBRARY=/path/to/slurm/pmi/library/libpmi.so</i>
 </pre>
@@ -187,7 +187,7 @@ For more information see
 
 <h2><a name="lam_mpi" href="http://www.lam-mpi.org/"><b>LAM/MPI</b></a></h2>
 
-<p>LAM/MPI relies upon the SLURM
+<p>LAM/MPI relies upon the Slurm
 <span class="commandline">salloc</span> or <span class="commandline">sbatch</span>
 command to allocate. In either case, specify
 the maximum number of tasks required for the job. Then execute the
@@ -265,7 +265,7 @@ You can refere yourself to <i>mpich2-1.5</i> implementation and configure MPICH
 <p>
 To check if the MPI version you are using supports PMI2 check for PMI2_* symbols in the MPI library.
 <p>
-SLURM provides a version of the PMI2 client library in the contribs directory. This library gets
+Slurm provides a version of the PMI2 client library in the contribs directory. This library gets
 installed in the Slurm lib directory. If your MPI implementation supports PMI2 and you wish to use
 the Slurm provided library you have to link the Slurm provided library explicitly:
 <pre>
@@ -276,7 +276,7 @@ $ srun -n20 a.out
 <h3>MPICH2 with srun and PMI version 1</h3>
 
 <p>Link your program with
-SLURM's implementation of the PMI library so that tasks can communicate
+Slurm's implementation of the PMI library so that tasks can communicate
 host and port information at startup. (The system administrator can add
 these option to the mpicc and mpif77 commands directly, so the user will not
 need to bother). For example:
@@ -321,8 +321,8 @@ to launch the tasks. A simple example is shown below.</p>
 <h2><a name="mpich_gm" href="http://www.myri.com/scs/download-mpichgm.html"><b>MPICH-GM</b></a></h2>
 
 <p>MPICH-GM jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mpichgm</i> MPI plugin must be used to establish communications
-between the launched tasks. This can be accomplished either using the SLURM
+Slurm's <i>mpichgm</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the Slurm
 configuration parameter <i>MpiDefault=mpichgm</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mpichgm</i> option.
 <pre>
@@ -335,8 +335,8 @@ $ srun -n16 --mpi=mpichgm a.out
 <h2><a name="mpich_mx" href="http://www.myri.com/scs/download-mpichmx.html"><b>MPICH-MX</b></a></h2>
 
 <p>MPICH-MX jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mpichmx</i> MPI plugin must be used to establish communications
-between the launched tasks. This can be accomplished either using the SLURM
+Slurm's <i>mpichmx</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the Slurm
 configuration parameter <i>MpiDefault=mpichmx</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mpichmx</i> option.
 <pre>
@@ -349,8 +349,8 @@ $ srun -n16 --mpi=mpichmx a.out
 <h2><a name="mvapich" href="http://mvapich.cse.ohio-state.edu/"><b>MVAPICH</b></a></h2>
 
 <p>MVAPICH jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mvapich</i> MPI plugin must be used to establish communications
-between the launched tasks. This can be accomplished either using the SLURM
+Slurm's <i>mvapich</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the Slurm
 configuration parameter <i>MpiDefault=mvapich</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mvapich</i> option.
 <pre>
@@ -441,7 +441,7 @@ $ srun -n16 --mpi=mpich1_shmem a.out
 
 <p>NOTE: Using a configuration of <i>MpiDefault=mpich1_shmem</i> will result in
 one task being launched per node with the expectation that the MPI library will
-launch the remaining tasks based upon environment variables set by SLURM.
+launch the remaining tasks based upon environment variables set by Slurm.
 Non-MPI jobs started in this configuration will lack the mechanism to launch
 more than one task per node unless srun's <i>--mpi=none</i> option is used.</p>
 
@@ -465,7 +465,7 @@ on a node appear to Slurm as coming from the one task that it
 launched. If the srun --label option is used, the task ID labels
 will be misleading.</p>
 
-<p>Other MPICH1 programming models current rely upon the SLURM
+<p>Other MPICH1 programming models current rely upon the Slurm
 <span class="commandline">salloc</span> or
 <span class="commandline">sbatch</span> command to allocate resources.
 In either case, specify the maximum number of tasks required for the job.
diff --git a/doc/html/overview.shtml b/doc/html/overview.shtml
index 69672368b0c..fc43fd998a6 100644
--- a/doc/html/overview.shtml
+++ b/doc/html/overview.shtml
@@ -6,7 +6,7 @@
 fault-tolerant, and highly scalable cluster management and job scheduling system
 for large and small Linux clusters. Slurm requires no kernel modifications for
 its operation and is relatively self-contained. As a cluster workload manager,
-SLURM has three key functions. First, it allocates exclusive and/or non-exclusive
+Slurm has three key functions. First, it allocates exclusive and/or non-exclusive
 access to resources (compute nodes) to users for some duration of time so they
 can perform work. Second, it provides a framework for starting, executing, and
 monitoring work (normally a parallel job) on the set of allocated nodes.
@@ -168,7 +168,7 @@ a job is assigned a set of nodes, the user is able to initiate parallel work in
 the form of job steps in any configuration within the allocation. For instance,
 a single job step may be started that utilizes all nodes allocated to the job,
 or several job steps may independently use a portion of the allocation.
-SLURM provides resource management for the processors allocated to a job,
+Slurm provides resource management for the processors allocated to a job,
 so that multiple job steps can be simultaneously submitted and queued until
 there are available resources within the job's allocation.</p>
 
diff --git a/doc/html/plugins.shtml b/doc/html/plugins.shtml
index ce072ff2dbb..415007eed26 100644
--- a/doc/html/plugins.shtml
+++ b/doc/html/plugins.shtml
@@ -16,7 +16,7 @@ Maui scheduler would give its type as &quot;sched/maui.&quot; It would implement
 the Slurm Scheduler API.</p>
 <h2>Versioning</h2>
 <p>Slurm plugin version numbers comprise a major and minor revision number. As
-SLURM evolves, changes to the individual plugin APIs may be necessary to implement
+Slurm evolves, changes to the individual plugin APIs may be necessary to implement
 new features. The major number identifies the version of the applicable API that
 the plugin implements. Incrementing the major version number denotes that the
 API has changed significantly and possibly incompatibly over prior versions.</p>
@@ -25,9 +25,9 @@ third parties, version skew may occur in a Slurm installation. Slurm may support
 multiple versions of each API in a backward-compatible fashion to provide time
 for plugin authors to update their plugins. Conversely, the plugin may support
 multiple versions of the API in order to be transparently portable across different
-SLURM installations. The version of the API spoken in an installation will be
+Slurm installations. The version of the API spoken in an installation will be
 the highest-numbered version which is common to both Slurm and the plugin. Each
-SLURM release will document which API versions it supports. From time to time
+Slurm release will document which API versions it supports. From time to time
 ancient API versions will be deprecated.</p>
 <p>The minor version number is incremented at the discretion of the plugin author
 and denotes revisions or upgrades particular to that implementation. If two or
@@ -81,8 +81,8 @@ after the last plugin-specific API call is made.</p>
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 <p>The functions need not appear. The plugin may provide either
diff --git a/doc/html/power_save.shtml b/doc/html/power_save.shtml
index 76adda3b253..67bcb630523 100644
--- a/doc/html/power_save.shtml
+++ b/doc/html/power_save.shtml
@@ -14,7 +14,7 @@ Of particular note, Slurm can power nodes up or down
 at a configurable rate to prevent rapid changes in power demands.
 For example, starting a 1000 node job on an idle cluster could result
 in an instantaneous surge in power demand of multiple megawatts without
-SLURM's support to increase power demands in a gradual fashion.</p>
+Slurm's support to increase power demands in a gradual fashion.</p>
 
 
 <h2>Configuration</h2>
diff --git a/doc/html/preempt.shtml b/doc/html/preempt.shtml
index 17621bc163c..3ec3554c3b1 100644
--- a/doc/html/preempt.shtml
+++ b/doc/html/preempt.shtml
@@ -3,7 +3,7 @@
 <H1>Preemption</H1>
 
 <P>
-SLURM supports job preemption, the act of stopping one or more "low-priority"
+Slurm supports job preemption, the act of stopping one or more "low-priority"
 jobs to let a "high-priority" job run uninterrupted until it completes.
 Job preemption is implemented as a variation of Slurm's
 <a href="gang_scheduling.html">Gang Scheduling</a> logic.
@@ -12,12 +12,12 @@ allocated to one or more low priority jobs, the low priority job(s) are
 preempted.
 The low priority job(s) can resume once the high priority job completes.
 Alternately, the low priority job(s) can be requeued and started using other
-resources if so configured in newer versions of SLURM.
+resources if so configured in newer versions of Slurm.
 </P>
 <P>
 The job's partition priority or its Quality Of Service (QOS) can be used to
 identify the which jobs can preempt or be preempted by other jobs.
-SLURM offers the ability to configure the preemption mechanism used on a per
+Slurm offers the ability to configure the preemption mechanism used on a per
 partition or per QOS basis.
 For example, jobs in a low priority queue may get requeued,
 while jobs in a medium priority queue may get suspended.
@@ -156,7 +156,7 @@ allocated to a common resource (and gang scheduled), set
 <P>
 To enable preemption after making the configuration changes described above,
 restart Slurm if it is already running. Any change to the plugin settings in
-SLURM requires a full restart of the daemons. If you just change the partition
+Slurm requires a full restart of the daemons. If you just change the partition
 <I>Priority</I> or <I>Shared</I> setting, this can be updated with
 <I>scontrol reconfig</I>.
 </P>
diff --git a/doc/html/preemption_plugins.shtml b/doc/html/preemption_plugins.shtml
index f74180f827d..df2917b681a 100644
--- a/doc/html/preemption_plugins.shtml
+++ b/doc/html/preemption_plugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm preemption plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own
-SLURM preemption plugins. This is version 100 of the API.</p>
+Slurm preemption plugins. This is version 100 of the API.</p>
 
 <p>Slurm preemption plugins are Slurm plugins that identify which jobs
 can be preempted by a pending job. They must conform to the Slurm Plugin
@@ -26,7 +26,7 @@ upon their Quality Of Service values as defined in the Slurm database.</li>
 </ul>
 
 <p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span> symbols required by the SLURM
+<span class="commandline">plugin_version</span> symbols required by the Slurm
 Plugin API require no specialization for job preemption support.
 Note carefully, however, the versioning discussion below.</p>
 
@@ -56,8 +56,8 @@ should be stubbed.</p>
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
@@ -96,7 +96,7 @@ preempted, otherwise false</p>
 <h2>Versioning</h2>
 <p> This document describes version 100 of the Slurm Preemption API. Future
 releases of Slurm may revise this API. A preemption plugin conveys its ability
-to implement a particular API version using the mechanism outlined for SLURM
+to implement a particular API version using the mechanism outlined for Slurm
 plugins.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/priority_plugins.shtml b/doc/html/priority_plugins.shtml
index efafc5d9ede..4c24b7f5af1 100644
--- a/doc/html/priority_plugins.shtml
+++ b/doc/html/priority_plugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm priority plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own
-SLURM priority plugins. This is version 100 of the API.</p>
+Slurm priority plugins. This is version 100 of the API.</p>
 
 <p>Slurm priority plugins are Slurm plugins that implement the Slurm priority
 API described herein. They must conform to the Slurm Plugin API with the
@@ -24,7 +24,7 @@ job priority.</li>
 </ul>
 
 <p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span> symbols required by the SLURM
+<span class="commandline">plugin_version</span> symbols required by the Slurm
 Plugin API require no specialization for job priority support.
 Note carefully, however, the versioning discussion below.</p>
 
@@ -91,8 +91,8 @@ be stubbed.</p>
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
@@ -141,7 +141,7 @@ containing the requested job priority factors</p>
 <h2>Versioning</h2>
 <p> This document describes version 101 of the Slurm Priority API. Future
 releases of Slurm may revise this API. A priority plugin conveys its ability
-to implement a particular API version using the mechanism outlined for SLURM
+to implement a particular API version using the mechanism outlined for Slurm
 plugins.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml
index 4cf3f81aa36..4b3e9b633ae 100644
--- a/doc/html/programmer_guide.shtml
+++ b/doc/html/programmer_guide.shtml
@@ -32,7 +32,7 @@ purpose plugin mechanism. A Slurm plugin is a dynamically linked code object tha
 is loaded explicitly at run time by the Slurm libraries. It provides a customized
 implementation of a well-defined API connected to tasks such as authentication,
 interconnect fabric, task scheduling, etc. A set of functions is defined for use
-by all of the different infrastructures of a particular variety. When a SLURM
+by all of the different infrastructures of a particular variety. When a Slurm
 daemon is initiated, it reads the configuration file to determine which of the
 available plugins should be used. A <a href="plugins.html">plugin developer's
 guide</a> is available with general information about plugins. Most plugin
@@ -52,7 +52,7 @@ of subdirectories for each type of file.</p>
 <b>configure.ac</b>, <b>Makefile.am</b>, <b>Make-rpm.mk</b>, <b>META</b>, <b>README</b>,
 <b>slurm.spec.in</b>, and the contents of the <b>auxdir</b> directory. <span class="commandline">autoconf</span>
 and <span class="commandline">make</span> commands are used to build and install
-SLURM in an automated fashion. NOTE: <span class="commandline">autoconf</span>
+Slurm in an automated fashion. NOTE: <span class="commandline">autoconf</span>
 version 2.52 or higher is required to build Slurm. Execute
 <span class="commandline">autoconf -V</span> to check your version number.
 The build process is described in the README file.
@@ -93,7 +93,7 @@ as follows: </p>
 the Slurm code. Used to send and get Slurm information from the central manager.
 These are the functions user applications might utilize.<br>
 <b>common</b>&#151;General purpose functions for widespread use throughout
-SLURM.<br>
+Slurm.<br>
 <b>database</b>&#151;Various database files that support the accounting
  storage plugin.<br>
 <b>plugins</b>&#151;Plugin functions for various infrastructures or optional
@@ -184,7 +184,7 @@ The <b>slurm.conf</b> can be built using a <a href="configurator.html">configura
 See <b>doc/man/man5/slurm.conf.5</b> and the man pages for other configuration files
 for more details.
 <b>init.d.slurm</b> is a script that determines which
-SLURM daemon(s) should execute on any node based upon the configuration file contents.
+Slurm daemon(s) should execute on any node based upon the configuration file contents.
 It will also manage these daemons: starting, signalling, restarting, and stopping them.</p>
 
 <h2>Test Suite</h2>
diff --git a/doc/html/prolog_epilog.shtml b/doc/html/prolog_epilog.shtml
index 9e1eb1680c1..c1ffbe64fb6 100644
--- a/doc/html/prolog_epilog.shtml
+++ b/doc/html/prolog_epilog.shtml
@@ -382,7 +382,7 @@ Name of the job.
 Available in PrologSlurmctld and EpilogSlurmctld only.</li>
 
 <li><b>SLURM_JOB_NODELIST</b>
-Nodes assigned to job. A SLURM hostlist expression.
+Nodes assigned to job. A Slurm hostlist expression.
 "scontrol show hostnames" can be used to convert this to a
 list of individual host names.
 Available in PrologSlurmctld and EpilogSlurmctld only.</li>
diff --git a/doc/html/publications.shtml b/doc/html/publications.shtml
index b39b4c7a6a7..6fa5acf726a 100644
--- a/doc/html/publications.shtml
+++ b/doc/html/publications.shtml
@@ -30,7 +30,7 @@ Chris Samuel (VLSCI)</li>
 
 
 <!--Slurm User Group Meeting 2013-->
-<li><b>Presentations from SLURM User Group Meeting, September 2014</b></li>
+<li><b>Presentations from Slurm User Group Meeting, September 2014</b></li>
 <ul>
 
 <li><a href="http://www.hsiphotography.com/p96779352/h24431f4d#h33c5b153">Group photo</a>
diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml
index f012a8c5791..188ce340306 100644
--- a/doc/html/quickstart_admin.shtml
+++ b/doc/html/quickstart_admin.shtml
@@ -26,10 +26,10 @@ and <i>--sysconfdir=</i></li>
 header files, etc.</li>
 <li>Build a configuration file using your favorite web browser and
 <i>doc/html/configurator.html</i>.<br>
-NOTE: The <i>SlurmUser</i> must exist prior to starting SLURM
+NOTE: The <i>SlurmUser</i> must exist prior to starting Slurm
 and must exist on all nodes of the cluster.<br>
 NOTE: The parent directories for Slurm's log files, process ID files,
-state save directories, etc. are not created by SLURM.
+state save directories, etc. are not created by Slurm.
 They must be created and made writable by <i>SlurmUser</i> as needed prior to
 starting Slurm daemons.<br>
 NOTE: If any parent directories are created during the installation process
@@ -161,7 +161,7 @@ Some macro definitions that may be used in building Slurm include:
 # .rpmmacros
 # For AIX at LLNL
 # Override some RPM macros from /usr/lib/rpm/macros
-# Set SLURM-specific macros for unconventional file locations
+# Set Slurm-specific macros for unconventional file locations
 #
 %_enable_debug     "--with-debug"
 %_prefix           /admin/llnl
@@ -283,7 +283,7 @@ users and groups must be configured on those hosts.</p>
 <h3>Authentication of Slurm communications</h3>
 <p>All communications between Slurm components are authenticated. The
 authentication infrastructure is provided by a dynamically loaded
-plugin chosen at runtime via the <b>AuthType</b> keyword in the SLURM
+plugin chosen at runtime via the <b>AuthType</b> keyword in the Slurm
 configuration file.  Currently available authentication types include
 <a href="http://www.theether.org/authd/">authd</a>,
 <a href="https://code.google.com/p/munge/">munge</a>, and none.
@@ -384,11 +384,11 @@ This is meant to be exploited by any parallel debugger (notably, TotalView),
 and support is unconditionally compiled into Slurm code.</p>
 
 <p>The following lines should also be added to the global <i>.tvdrc</i> file
-for TotalView to operate with SLURM:
+for TotalView to operate with Slurm:
 <pre>
 dset TV::parallel_configs {
-	name: SLURM;
-	description: SLURM;
+	name: Slurm;
+	description: Slurm;
 	starter: srun %s %p %a;
 	style: manager_process;
 	tasks_option: -n;
@@ -406,7 +406,7 @@ to compute nodes available for download.
 When installed, the Slurm PAM module will prevent users from logging
 into any node that has not be assigned to that user.
 On job termination, any processes initiated by the user outside of
-SLURM's control may be killed using an <i>Epilog</i> script configured
+Slurm's control may be killed using an <i>Epilog</i> script configured
 in <i>slurm.conf</i>.
 An example of such a script is included as <i>etc/slurm.epilog.clean</i>.
 Without these mechanisms any user can login to any compute node,
@@ -432,10 +432,10 @@ In this case &quot;emcri&quot; is the private management network interface
 for the host &quot;mcri&quot;. Port numbers to be used for
 communications are specified as well as various timer values.</p>
 
-<p>The <i>SlurmUser</i> must be created as needed prior to starting SLURM
+<p>The <i>SlurmUser</i> must be created as needed prior to starting Slurm
 and must exist on all nodes in your cluster.
 The parent directories for Slurm's log files, process ID files,
-state save directories, etc. are not created by SLURM.
+state save directories, etc. are not created by Slurm.
 They must be created and made writable by <i>SlurmUser</i> as needed prior to
 starting Slurm daemons.</p>
 
diff --git a/doc/html/resource_limits.shtml b/doc/html/resource_limits.shtml
index 66ff42c558b..d613194b8b1 100644
--- a/doc/html/resource_limits.shtml
+++ b/doc/html/resource_limits.shtml
@@ -45,7 +45,7 @@ for SlurmDBD execution.</p>
 
 <p>Both accounting and scheduling policies are configured based upon
 an <i>association</i>. An <i>association</i> is a 4-tuple consisting
-of the cluster name, bank account, user and (optionally) the SLURM
+of the cluster name, bank account, user and (optionally) the Slurm
 partition.
 In order to enforce scheduling policy, set the value of
 <b>AccountingStorageEnforce</b>.
@@ -217,7 +217,7 @@ specified then no limit will apply.</p>
 </ul>
 
 <p>The <b>MaxNodes</b> and <b>MaxWall</b> options already exist in
-SLURM's configuration on a per-partition basis, but the above options
+Slurm's configuration on a per-partition basis, but the above options
 provide the ability to impose limits on a per-user basis.  The
 <b>MaxJobs</b> option provides an entirely new mechanism for Slurm to
 control the workload any individual may place on a cluster in order to
diff --git a/doc/html/review_release.html b/doc/html/review_release.html
deleted file mode 100644
index 2bb49a043a5..00000000000
--- a/doc/html/review_release.html
+++ /dev/null
@@ -1,76 +0,0 @@
-<html>
-
-<head>
-<title>SLURM Web pages for Review and Release</title>
-<!-- Updated 6 May 2011 -->
-</head>
-
-<body>
-<h1>SLURM Web pages for Review and Release</h1>
-<b>NOTE: Do not follow links.</b>
-<ul>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm_ug_agenda.html">slurm_ug_agenda.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm_ug_registration.html">slurm_ug_registration.html</a></li>
-<!-- 
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting.html">accounting.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting_storageplugins.html">accounting_storageplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/api.html">api.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/authplugins.html">authplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/big_sys.html">big_sys.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/bluegene.html">bluegene.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/checkpoint_blcr.html">checkpoint_blcr.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/checkpoint_plugins.html">checkpoint_plugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/configurator.html">configurator.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/cons_res.html">cons_res.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/cons_res_share.html">cons_res_share.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/cray.html">cray.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/crypto_plugins.html">crypto_plugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/dist_plane.html">dist_plane.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/documentation.html">documentation.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/download.html">download.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/faq.html">faq.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/gang_scheduling.html">gang_scheduling.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/gres.html">gres.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/gres_design.html">gres_design.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/gres_plugins.html">gres_plugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/help.html">help.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/ibm.html">ibm.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/jobacct_gatherplugins.html">jobacct_gatherplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/jobcompplugins.html">jobcompplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/mail.html">mail.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/maui.html">maui.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/mc_support.html">mc_support.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/moab.html">moab.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/mpi_guide.html">mpi_guide.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/mpiplugins.html">mpiplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/news.html">news.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/overview.html">overview.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/platforms.html">platforms.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/plugins.html">plugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/power_save.html">power_save.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/preempt.html">preempt.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/priority_multifactor.html">priority_multifactor.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/priority_plugins.html">priority_plugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/proctrack_plugins.html">proctrack_plugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/programmer_guide.html">programmer_guide.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/publications.html">publications.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/qos.html">qos.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/quickstart_admin.html">quickstart_admin.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/quickstart.html">quickstart.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/reservations.html">reservations.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/resource_limits.html">resource_limits.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/schedplugins.html">schedplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/selectplugins.html">selectplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm.html">slurm.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/sun_const.html">sun_const.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/switchplugins.html">switchplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/taskplugins.html">taskplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/team.html">team.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/testimonials.html">testimonials.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/topology.html">topology.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/topology_plugin.html">topology_plugin.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/troubleshoot.html">troubleshoot.html</a></li> -->
-</ul>
-</body>
-</html>
-
diff --git a/doc/html/sun_const.shtml b/doc/html/sun_const.shtml
index 318565e2d98..a1a780708c6 100644
--- a/doc/html/sun_const.shtml
+++ b/doc/html/sun_const.shtml
@@ -23,7 +23,7 @@ of an appropriate format.</p>
 <i>HAVE_SUN_CONST</i> and <i>SYSTEM_DIMENSIONS=4</i>
 (more on that value later).
 This can be accomplished in several different ways depending upon how
-SLURM is being built.
+Slurm is being built.
 <ol>
 <li>Execute the <i>configure</i> command with the option
 <i>--enable-sun-const</i> <b>OR</b></li>
@@ -45,7 +45,7 @@ case letters for higher values, for up to 36 nodes at a specific coordinate
 (e.g. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, ... Z).
 To avoid confusion, we recommend that the node name prefix consist of
 lower case letters.
-Numerically sequential node names may specified by in SLURM
+Numerically sequential node names may specified by in Slurm
 commands and configuration files using the system name prefix with the
 end-points enclosed in square brackets and separated by an "-".
 For example "tux[0000-000B]" is used to represent the twelve nodes
@@ -150,7 +150,7 @@ In order to locate specific nodes with the <i>sview</i> command, select
 node names.
 The output of other Slurm commands (e.g. <i>sinfo</i> and <i>squeue</i>)
 will use a Slurm hostlist expression with the node names numerically ordered).
-SLURM partitions should contain nodes which are defined sequentially
+Slurm partitions should contain nodes which are defined sequentially
 by that ordering for optimal performance.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml
index 7959d61a893..12b56370c3c 100644
--- a/doc/html/taskplugins.shtml
+++ b/doc/html/taskplugins.shtml
@@ -8,7 +8,7 @@ that defines them. It is intended as a resource to programmers wishing
 to write their own Slurm scheduler plugins. This is version 2 of the API.</p>
 
 <p>Slurm task management plugins are Slurm plugins that implement the
-SLURM task management API described herein. They would typically be
+Slurm task management API described herein. They would typically be
 used to control task affinity (i.e. binding tasks to processors).
 They must conform to the Slurm Plugin API with the following
 specifications:</p>
@@ -20,7 +20,7 @@ abbreviation for the type of task management. We recommend, for example:</p>
 The actual mechanism used to task binding is dependent upon the available
 infrastructure as determined by the "configure" program when Slurm is built
 and the value of the <b>TaskPluginParam</b> as defined in the <b>slurm.conf</b>
-(SLURM configuration file).</li>
+(Slurm configuration file).</li>
 <li><b>cgroup</b>&#151;Use Linux cgroups for binding tasks to resources.</li>
 <li><b>none</b>&#151;A plugin that implements the API without providing any
 services. This is the default behavior and provides no task binding.</li>
@@ -63,8 +63,8 @@ be stubbed.</p>
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/topology.shtml b/doc/html/topology.shtml
index c740f5335f1..98d1e066260 100644
--- a/doc/html/topology.shtml
+++ b/doc/html/topology.shtml
@@ -20,7 +20,7 @@ allocated to the job.</p>
 The IBM BlueGene computers is one example of this which has highly
 constrained resource allocation scheme, essentially requiring that
 jobs be allocated a set of nodes logically having a rectangular prism shape.
-SLURM has a plugin specifically written for BlueGene to select appropriate
+Slurm has a plugin specifically written for BlueGene to select appropriate
 nodes for jobs, change network switch routing, boot nodes, etc as described
 in the <a href="bluegene.html">BlueGene User and Administrator Guide</a>.</p>
 
@@ -28,11 +28,11 @@ in the <a href="bluegene.html">BlueGene User and Administrator Guide</a>.</p>
 torus interconnects, but do not require that jobs execute in adjacent nodes.
 On those systems, Slurm only needs to allocate resources to a job which
 are nearby on the network.
-SLURM accomplishes this using a
+Slurm accomplishes this using a
 <a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>
 to map the nodes from a three-dimensional space into a one-dimensional
 space.
-SLURM's native best-fit algorithm is thus able to achieve a high degree
+Slurm's native best-fit algorithm is thus able to achieve a high degree
 of locality for jobs.
 For more information, see Slurm's documentation for
 <a href="sun_const.html">Sun Constellation</a> and
@@ -71,7 +71,7 @@ plus a <i>Nodes</i> field to identify the nodes connected to the
 switch.
 Higher-level switch descriptions contain a <i>SwitchName</i> field
 plus a <i>Switches</i> field to identify the child switches.
-SLURM's hostlist expression parser is used, so the node and switch
+Slurm's hostlist expression parser is used, so the node and switch
 names need not be consecutive (e.g. "Nodes=tux[0-3,12,18-20]"
 and "Switches=s[0-2,4-8,12]" will parse fine).
 </p>
@@ -116,11 +116,11 @@ SwitchName=s7 Switches=s[0-3]  LinkSpeed=1800
 <img src=topo_ex2.gif width=600>
 
 <p>As a practical matter, listing every switch connection
-definitely results in a slower scheduling algorithm for SLURM
+definitely results in a slower scheduling algorithm for Slurm
 to optimize job placement.
 The application performance may achieve little benefit from such optimization.
 Listing the leaf switches with their nodes plus one top level switch
-should result in good performance for both applications and SLURM.
+should result in good performance for both applications and Slurm.
 The previous example might be configured as follows:
 <pre>
 # topology.conf
diff --git a/doc/html/topology_plugin.shtml b/doc/html/topology_plugin.shtml
index fb180f44086..dac88f72995 100644
--- a/doc/html/topology_plugin.shtml
+++ b/doc/html/topology_plugin.shtml
@@ -6,7 +6,7 @@
 <p> This document describes Slurm topology plugin and the API that
 defines them.
 It is intended as a resource to programmers wishing to write their own
-SLURM topology plugin.
+Slurm topology plugin.
 This is version 101 of the API.</p>
 
 <p>Slurm topology plugins are Slurm plugins that implement
@@ -75,8 +75,8 @@ Functions which are not implemented should be stubbed.</p>
 <p><b>Note</b>: These init and fini functions are not the same as those
 described in the <span class="commandline">dlopen (3)</span> system library.
 The C run-time system co-opts those symbols for its own initialization.
-The system <span class="commandline">_init()</span> is called before the SLURM
-<span class="commandline">init()</span>, and the SLURM
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
 <span class="commandline">fini()</span> is called before the system's
 <span class="commandline">_fini()</span>.</p>
 
diff --git a/doc/html/troubleshoot.shtml b/doc/html/troubleshoot.shtml
index ed562ae8ebc..c7cfa8d5ef9 100644
--- a/doc/html/troubleshoot.shtml
+++ b/doc/html/troubleshoot.shtml
@@ -78,7 +78,7 @@ Note: All running jobs and other state information will be lost.</li>
 
 <h2><a name="sched">Jobs are not getting scheduled</a></h2>
 
-<p>This is dependent upon the scheduler used by SLURM.
+<p>This is dependent upon the scheduler used by Slurm.
 Executing the command "<i>scontrol show config | grep SchedulerType</i>"
 to determine this.
 For any scheduler, you can check priorities of jobs using the
@@ -118,7 +118,7 @@ Please refer to its documentation for help.</li>
 <h2><a name="completing">Jobs and nodes are stuck in COMPLETING state</a></h2>
 
 <p>This is typically due to non-killable processes associated with the job.
-SLURM will continue to attempt terminating the processes with SIGKILL, but
+Slurm will continue to attempt terminating the processes with SIGKILL, but
 some jobs may stuck performing I/O and non-killable.
 This is typically due to a file system problem and may be addressed in
 a couple of ways.</p>
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index f9cf3059583..e2274b3ba26 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -2,18 +2,18 @@
 
 .SH "NAME"
 sacct \- displays accounting data for all jobs and job steps in the
-SLURM job accounting log or SLURM database
+Slurm job accounting log or Slurm database
 
 .SH "SYNOPSIS"
 \fBsacct\fR [\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
 .PP
-Accounting information for jobs invoked with SLURM are either logged
-in the job accounting log file or saved to the SLURM database.
+Accounting information for jobs invoked with Slurm are either logged
+in the job accounting log file or saved to the Slurm database.
 .PP
 The \f3sacct\fP command displays job accounting data stored in the job
-accounting log file or SLURM database in a variety of forms for your
+accounting log file or Slurm database in a variety of forms for your
 analysis.  The \f3sacct\fP command displays information on jobs, job
 steps, status, and exitcodes by default.  You can tailor the output
 with the use of the \f3\-\-format=\fP option to specify the fields to
@@ -43,8 +43,8 @@ needed, it is recommended that a database type of accounting storage be
 configured.
 .TP
 \f3Note: \fP\c
-The content's of SLURM's database are maintained in lower case. This may
-result in some \f3sacct\fP output differing from that of other SLURM commands.
+The content's of Slurm's database are maintained in lower case. This may
+result in some \f3sacct\fP output differing from that of other Slurm commands.
 .TP
 \f3Note: \fP\c
 Much of the data reported by \f3sacct\fP has been generated by
@@ -97,7 +97,7 @@ parameter in the slurm.conf file must be defined to a non-none option.
 
 .TP
 \f3\-D\fP\f3,\fP \f3\-\-duplicates\fP
-If SLURM job ids are reset, some job numbers will probably appear more
+If Slurm job ids are reset, some job numbers will probably appear more
 than once in the accounting log file but refer to different jobs.
 Such jobs can be distinguished by the "submit" time stamp in the data
 records.
@@ -164,7 +164,7 @@ YYYY\-MM\-DD[THH:MM[:SS]]
 .TP
 \f3\-f \fP\f2file\fP\f3,\fP  \f3\-\-file\fP\f3=\fP\f2file\fP
 Causes the \f3sacct\fP command to read job accounting data from the
-named \f2file\fP instead of the current SLURM job accounting log
+named \f2file\fP instead of the current Slurm job accounting log
 file. Only applicable when running the filetxt plugin.
 
 .TP
@@ -805,7 +805,7 @@ along with their corresponding options, are listed below. (Note:
 Commandline options will always override these settings.)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_TIME_FORMAT\fR
 Specify the format used to report time stamps. A value of \fIstandard\fR, the
@@ -954,15 +954,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2014 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index d25eb92c336..9bca6b7710b 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -26,8 +26,8 @@ commit the changes and exit.
 
 .TP "7"
 \f3Note: \fP\c
-The content's of SLURM's database are maintained in lower case. This may
-result in some \f3sacctmgr\fP output differing from that of other SLURM
+The content's of Slurm's database are maintained in lower case. This may
+result in some \f3sacctmgr\fP output differing from that of other Slurm
 commands.
 
 .SH "OPTIONS"
@@ -956,7 +956,7 @@ completion.
 .SH "LIST/SHOW JOB FORMAT OPTIONS"
 
 The \fBsacct\fR command is the exclusive command to display job
-records from the SLURM database.
+records from the Slurm database.
 
 .SH "SPECIFICATIONS FOR QOS"
 \fBNOTE:\fR The group limits (GrpJobs, GrpNodes, etc.) are tested when a job is
@@ -1177,8 +1177,8 @@ inside the limit.
 Maximum number of CPUs running jobs are able to be allocated in aggregate for
 this QOS.
 To clear a previously set value use the modify command with a new
-value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
-You can still set this, but have to wait for future versions of SLURM
+value of \-1.  (NOTE: This limit is not currently enforced in Slurm.
+You can still set this, but have to wait for future versions of Slurm
 before it is enforced.)
 
 .TP
@@ -1224,8 +1224,8 @@ value of \-1.
 \fIMaxCPUs\fP
 Maximum number of CPUs each job is able to use.
 To clear a previously set value use the modify command with a new
-value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
-You can still set this, but have to wait for future versions of SLURM
+value of \-1.  (NOTE: This limit is not currently enforced in Slurm.
+You can still set this, but have to wait for future versions of Slurm
 before it is enforced.)
 
 .TP
@@ -1582,10 +1582,10 @@ e.g. format=name%30 will print 30 characters of field name right
 justified.  A \-30 will print 30 characters left justified.
 
 .SH "FLAT FILE DUMP AND LOAD"
-sacctmgr has the capability to load and dump SLURM association data to and
+sacctmgr has the capability to load and dump Slurm association data to and
 from a file.  This method can easily add a new cluster or copy an
 existing clusters associations into a new cluster with similar
-accounts. Each file contains SLURM association data for a single
+accounts. Each file contains Slurm association data for a single
 cluster.  Comments can be put into the file with the # character.
 Each line of information must begin with one of the four titles; \fBCluster, Parent, Account or
 User\fP. Following the title is a space, dash, space, entity value,
@@ -1881,7 +1881,7 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "EXAMPLES"
 \fBNOTE:\fR There is an order to set up accounting associations.
@@ -2091,15 +2091,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 4d6928d3c9b..c1a1f311e13 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -1,14 +1,14 @@
 .TH salloc "1" "Slurm Commands" "April 2015" "Slurm Commands"
 
 .SH "NAME"
-salloc \- Obtain a SLURM job allocation (a set of nodes), execute a command,
+salloc \- Obtain a Slurm job allocation (a set of nodes), execute a command,
 and then release the allocation when the command is finished.
 
 .SH "SYNOPSIS"
 salloc [\fIoptions\fP] [<\fIcommand\fP> [\fIcommand args\fR]]
 
 .SH "DESCRIPTION"
-salloc is used to allocate a SLURM job allocation, which is a set of resources
+salloc is used to allocate a Slurm job allocation, which is a set of resources
 (nodes), possibly with some set of constraints (e.g. number of processors per
 node).  When salloc successfully obtains the requested allocation, it then runs
 the command specified by the user.  Finally, when the user specified command is
@@ -44,7 +44,7 @@ command.
 .TP
 \fB\-\-acctg\-freq\fR
 Define the job accounting and profiling sampling intervals.
-This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's
+This can be used to override the \fIJobAcctGatherFrequency\fR parameter in Slurm's
 configuration file, \fIslurm.conf\fR.
 The supported format is as follows:
 .RS
@@ -87,7 +87,7 @@ The default value for the task sampling interval
 is 30. The default value for all other intervals is 0.
 An interval of 0 disables sampling of the specified type.
 If the task sampling interval is 0, accounting
-information is collected only at job termination (reducing SLURM
+information is collected only at job termination (reducing Slurm
 interference with the job).
 .br
 .br
@@ -123,7 +123,7 @@ If not specified, the scontrol show job will display 'ReqS:C:T=*:*:*'.
 
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
-Submit the batch script to the SLURM controller immediately, like normal, but
+Submit the batch script to the Slurm controller immediately, like normal, but
 tell the controller to defer the allocation of the job until the specified time.
 
 Time may be of the form \fIHH:MM:SS\fR to run a job at
@@ -138,7 +138,7 @@ a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
 format \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR. You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR,
-\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run
+\fIdays\fR, or \fIweeks\fR and you can tell Slurm to run
 the job today with the keyword \fItoday\fR and to run the
 job tomorrow with the keyword \fItomorrow\fR.
 The value may be changed after job submission using the
@@ -155,11 +155,11 @@ For example:
 .PP
 Notes on date/time specifications:
  \- Although the 'seconds' field of the HH:MM:SS time specification is
-allowed by the code, note that the poll time of the SLURM scheduler
+allowed by the code, note that the poll time of the Slurm scheduler
 is not precise enough to guarantee dispatch of the job on the exact
 second.  The job will be eligible to start on the next poll
 following the specified time. The exact poll interval depends on the
-SLURM scheduler (e.g., 60 seconds with the default sched/builtin).
+Slurm scheduler (e.g., 60 seconds with the default sched/builtin).
  \- If no time (HH:MM:SS) is specified, the default is (00:00:00).
  \- If a date is specified without a year (e.g., MM/DD) then the current
 year is assumed, unless the combination of MM/DD and HH:MM:SS has
@@ -179,7 +179,7 @@ An arbitrary comment.
 
 .TP
 \fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
-Nodes can have \fBfeatures\fR assigned to them by the SLURM administrator.
+Nodes can have \fBfeatures\fR assigned to them by the Slurm administrator.
 Users can specify which of these \fBfeatures\fR are required by their job
 using the constraint option.
 Only nodes having features matching the job constraints will be used to
@@ -241,7 +241,7 @@ above when task/affinity plugin is enabled.
 
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
-Advise the SLURM controller that ensuing job steps will require \fIncpus\fR
+Advise the Slurm controller that ensuing job steps will require \fIncpus\fR
 number of processors per task.  Without this option, the controller will
 just try to allocate one processor per task.
 
@@ -308,7 +308,7 @@ Much like \-\-nodelist, but the list is contained in a file of name
 \fInode file\fR.  The node names of the list may also span multiple lines
 in the file.    Duplicate node names in the file will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted by SLURM.
+will be sorted by Slurm.
 
 .TP
 \fB\-\-get\-user\-env\fR[=\fItimeout\fR][\fImode\fR]
@@ -324,7 +324,7 @@ The optional \fImode\fR value control the "su" options.
 With a \fImode\fR value of "S", "su" is executed without the "\-" option.
 With a \fImode\fR value of "L", "su" is executed with the "\-" option,
 replicating the login environment.
-If \fImode\fR not specified, the mode established at SLURM build time
+If \fImode\fR not specified, the mode established at Slurm build time
 is used.
 Example of use include "\-\-get\-user\-env", "\-\-get\-user\-env=10"
 "\-\-get\-user\-env=10L", and "\-\-get\-user\-env=S".
@@ -413,11 +413,11 @@ NOTE: Only valid for user root.
 salloc always runs a user\-specified command once the allocation is
 granted.  salloc will wait indefinitely for that command to exit.
 If you specify the \-\-kill\-command option salloc will send a signal to
-your command any time that the SLURM controller tells salloc that its job
+your command any time that the Slurm controller tells salloc that its job
 allocation has been revoked. The job allocation can be revoked for a
 couple of reasons: someone used \fBscancel\fR to revoke the allocation,
 or the allocation reached its time limit.  If you do not specify a signal
-name or number and SLURM is configured to signal the spawned command at job
+name or number and Slurm is configured to signal the spawned command at job
 termination, the default signal is SIGHUP for interactive and SIGTERM for
 non\-interactive sessions. Since this option's argument is optional,
 for proper parsing the single letter option must be followed
@@ -433,7 +433,7 @@ MPI jobs) on that node will almost certainly suffer a fatal error, but with
 \-\-no\-kill, the job allocation will not be revoked so the user may launch
 new job steps on the remaining nodes in their allocation.
 
-By default SLURM terminates the entire job allocation if any node fails in its
+By default Slurm terminates the entire job allocation if any node fails in its
 range of allocated nodes.
 
 .TP
@@ -611,7 +611,7 @@ If you want greater control, try running a simple test code with the
 options "\-\-mem_bind=verbose,none" to determine
 the specific configuration.
 
-NOTE: To have SLURM always report on the selected memory binding for
+NOTE: To have Slurm always report on the selected memory binding for
 all commands executed in a shell, you can enable verbose mode by
 setting the SLURM_MEM_BIND environment variable value to "verbose".
 
@@ -695,7 +695,7 @@ of "k" (multiplies numeric value by 1,024) or "m" (multiplies numeric value by
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
 salloc does not launch tasks, it requests an allocation of resources and
-executed some command. This option advises the SLURM controller that job
+executed some command. This option advises the Slurm controller that job
 steps run within this allocation will launch a maximum of \fInumber\fR
 tasks and sufficient resources are allocated to accomplish this.
 The default is one task per node, but note
@@ -845,7 +845,7 @@ of 32 connections are established (2 instances x 2 protocols x 2 networks x
 
 .TP
 \fB\-\-nice\fR[=\fIadjustment\fR]
-Run the job with an adjusted scheduling priority within SLURM.
+Run the job with an adjusted scheduling priority within Slurm.
 With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
 to 10000 (lowest priority). Only privileged users can specify
@@ -898,14 +898,14 @@ Silence salloc's use of the terminal bell. Also see the option \fB\-\-bell\fR.
 .TP
 \fB\-\-no\-shell\fR
 immediately exit after allocating resources, without running a
-command. However, the SLURM job will still be created and will remain
+command. However, the Slurm job will still be created and will remain
 active and will own the allocated resources as long as it is active.
-You will have a SLURM job id with no associated processes or
+You will have a Slurm job id with no associated processes or
 tasks. You can submit \fBsrun\fR commands against this resource allocation,
-if you specify the \fB\-\-jobid=\fR option with the job id of this SLURM job.
+if you specify the \fB\-\-jobid=\fR option with the job id of this Slurm job.
 Or, this can be used to temporarily reserve a set of resources so that
 other jobs cannot use them for some period of time.  (Note that the
-SLURM job is subject to the normal constraints on jobs, including time
+Slurm job is subject to the normal constraints on jobs, including time
 limits, so that eventually the job will terminate and the resources
 will be freed, or you can terminate the job manually using the
 \fBscancel\fR command.)
@@ -924,7 +924,7 @@ By specifying \fB\-\-overcommit\fR you are explicitly allowing more than one
 process per CPU. However no more than \fBMAX_TASKS_PER_NODE\fR tasks are
 permitted to execute per node.  NOTE: \fBMAX_TASKS_PER_NODE\fR is
 defined in the file \fIslurm.h\fR and is not a variable, it is set at
-SLURM build time.
+Slurm build time.
 
 .TP
 \fB\-\-priority\fR=<value>
@@ -981,9 +981,9 @@ Suppress informational messages from salloc. Errors will still be displayed.
 .TP
 \fB\-\-qos\fR=<\fIqos\fR>
 Request a quality of service for the job.  QOS values can be defined
-for each user/cluster/account association in the SLURM database.
+for each user/cluster/account association in the Slurm database.
 Users will be limited to their association's defined set of qos's when
-the SLURM configuration parameter, AccountingStorageEnforce, includes
+the Slurm configuration parameter, AccountingStorageEnforce, includes
 "qos" in it's definition.
 
 .TP
@@ -1022,7 +1022,7 @@ CoreSpecCount and use the specialized resources on nodes it is allocated.
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
 When a job is within \fIsig_time\fR seconds of its end time,
 send it the signal \fIsig_num\fR.
-Due to the resolution of event handling by SLURM, the signal may
+Due to the resolution of event handling by Slurm, the signal may
 be sent up to 60 seconds earlier than specified.
 \fIsig_num\fR may either be a signal number or name (e.g. "10" or "USR1").
 \fIsig_time\fR must have an integer value between 0 and 65535.
@@ -1040,7 +1040,7 @@ task/affinity plugin is enabled.
 \fB\-\-switches\fR=<\fIcount\fR>[@<\fImax\-time\fR>]
 When a tree topology is used, this defines the maximum count of switches
 desired for the job allocation and optionally the maximum time to wait
-for that number of switches. If SLURM finds an allocation containing more
+for that number of switches. If Slurm finds an allocation containing more
 switches than the count specified, the job remains pending until it either finds
 an allocation with desired switch count or the time limit expires.
 It there is no switch count limit, there is no delay in starting the job.
@@ -1059,7 +1059,7 @@ requested time limit exceeds the partition's time limit, the job will
 be left in a PENDING state (possibly indefinitely).  The default time
 limit is the partition's default time limit.  When the time limit is reached,
 each task in each job step is sent SIGTERM followed by SIGKILL.  The
-interval between signals is specified by the SLURM configuration
+interval between signals is specified by the Slurm configuration
 parameter \fBKillWait\fR.  A time limit of zero requests that no time
 limit be imposed.  Acceptable time formats include "minutes",
 "minutes:seconds", "hours:minutes:seconds", "days\-hours",
@@ -1132,7 +1132,7 @@ by the supplied host list, additional resources will be allocated on other
 nodes as needed.
 Duplicate node names in the list will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted by SLURM.
+will be sorted by Slurm.
 
 .TP
 \fB\-\-wait\-all\-nodes\fR=<\fIvalue\fR>
@@ -1175,7 +1175,7 @@ Default from \fIblugene.conf\fR if not set.
 \fB\-\-conn\-type\fR=<\fItype\fR>
 Require the block connection type to be of a certain type.
 On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
-If NAV, or if not set, then SLURM will try to fit a what the
+If NAV, or if not set, then Slurm will try to fit a what the
 DefaultConnType is set to in the bluegene.conf if that isn't set the
 default is TORUS.
 You should not normally set this option.
@@ -1316,20 +1316,20 @@ Same as \fB\-\-wckey\fR
 Max time waiting for requested switches. See \fB\-\-switches\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs
+Specifies the exit code generated when a Slurm error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
+various Slurm error conditions.
 Also see \fBSLURM_EXIT_IMMEDIATE\fR.
 .TP
 \fBSLURM_EXIT_IMMEDIATE\fR
 Specifies the exit code generated when the \fB\-\-immediate\fR option
 is used and resources are not currently available.
 This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
+various Slurm error conditions.
 Also see \fBSLURM_EXIT_ERROR\fR.
 
 .SH "OUTPUT ENVIRONMENT VARIABLES"
@@ -1450,15 +1450,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sattach.1 b/doc/man/man1/sattach.1
index 096f4e5e810..451ed9ef469 100644
--- a/doc/man/man1/sattach.1
+++ b/doc/man/man1/sattach.1
@@ -2,14 +2,14 @@
 
 .SH "NAME"
 .LP
-sattach \- Attach to a SLURM job step.
+sattach \- Attach to a Slurm job step.
 .SH "SYNOPSIS"
 .LP
 sattach [\fIoptions\fP] <jobid.stepid>
 .SH "DESCRIPTION"
 .LP
-sattach attaches to a running SLURM job step.  By attaching, it makes available
-the IO streams of all of the tasks of a running SLURM job step.  It also
+sattach attaches to a running Slurm job step.  By attaching, it makes available
+the IO streams of all of the tasks of a running Slurm job step.  It also
 suitable for use with a parallel debugger like TotalView.
 
 .SH "OPTIONS"
@@ -63,7 +63,7 @@ Display brief usage message and exit.
 
 .TP
 \fB\-V\fR, \fB\-\-version\fR
-Display SLURM version number and exit.
+Display Slurm version number and exit.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
@@ -79,13 +79,13 @@ variables settings.
 .TP
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs
+Specifies the exit code generated when a Slurm error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
+various Slurm error conditions.
 
 
 .SH "EXAMPLES"
@@ -102,15 +102,15 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index e932b1f719c..0cdce3378ff 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -1,19 +1,19 @@
 .TH sbatch "1" "Slurm Commands" "April 2015" "Slurm Commands"
 
 .SH "NAME"
-sbatch \- Submit a batch script to SLURM.
+sbatch \- Submit a batch script to Slurm.
 
 .SH "SYNOPSIS"
 sbatch [\fIoptions\fP] \fIscript\fP [\fIargs\fP...]
 
 .SH "DESCRIPTION"
-sbatch submits a batch script to SLURM.  The batch script may be given to
+sbatch submits a batch script to Slurm.  The batch script may be given to
 sbatch through a file name on the command line, or if no file name is specified,
 sbatch will read in a script from standard input. The batch script may contain
 options preceded with "#SBATCH" before any executable commands in the script.
 
 sbatch exits immediately after the script is successfully transferred to the
-SLURM controller and assigned a SLURM job ID.  The batch script is not
+Slurm controller and assigned a Slurm job ID.  The batch script is not
 necessarily granted resources immediately, it may sit in the queue of pending
 jobs for some time before its required resources become available.
 
@@ -22,7 +22,7 @@ the name "slurm\-%j.out", where the "%j" is replaced with the job allocation
 number. The file will be generated on the first node of the job allocation.
 Other than the batch script itself, Slurm does no movement of user files.
 
-When the job allocation is finally granted for the batch script, SLURM
+When the job allocation is finally granted for the batch script, Slurm
 runs a single copy of the batch script on the first node in the set of
 allocated nodes.
 
@@ -60,7 +60,7 @@ command.
 .TP
 \fB\-\-acctg\-freq\fR
 Define the job accounting and profiling sampling intervals.
-This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's
+This can be used to override the \fIJobAcctGatherFrequency\fR parameter in Slurm's
 configuration file, \fIslurm.conf\fR.
 The supported format is as follows:
 .RS
@@ -103,7 +103,7 @@ The default value for the task sampling interval is 30 seconds.
 The default value for all other intervals is 0.
 An interval of 0 disables sampling of the specified type.
 If the task sampling interval is 0, accounting
-information is collected only at job termination (reducing SLURM
+information is collected only at job termination (reducing Slurm
 interference with the job).
 .br
 .br
@@ -139,7 +139,7 @@ If not specified, the scontrol show job will display 'ReqS:C:T=*:*:*'.
 
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
-Submit the batch script to the SLURM controller immediately, like normal, but
+Submit the batch script to the Slurm controller immediately, like normal, but
 tell the controller to defer the allocation of the job until the specified time.
 
 Time may be of the form \fIHH:MM:SS\fR to run a job at
@@ -154,7 +154,7 @@ a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
 format \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR. You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR,
-\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run
+\fIdays\fR, or \fIweeks\fR and you can tell Slurm to run
 the job today with the keyword \fItoday\fR and to run the
 job tomorrow with the keyword \fItomorrow\fR.
 The value may be changed after job submission using the
@@ -171,11 +171,11 @@ For example:
 .PP
 Notes on date/time specifications:
  \- Although the 'seconds' field of the HH:MM:SS time specification is
-allowed by the code, note that the poll time of the SLURM scheduler
+allowed by the code, note that the poll time of the Slurm scheduler
 is not precise enough to guarantee dispatch of the job on the exact
 second.  The job will be eligible to start on the next poll
 following the specified time. The exact poll interval depends on the
-SLURM scheduler (e.g., 60 seconds with the default sched/builtin).
+Slurm scheduler (e.g., 60 seconds with the default sched/builtin).
  \- If no time (HH:MM:SS) is specified, the default is (00:00:00).
  \- If a date is specified without a year (e.g., MM/DD) then the current
 year is assumed, unless the combination of MM/DD and HH:MM:SS has
@@ -205,7 +205,7 @@ special characters.
 
 .TP
 \fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
-Nodes can have \fBfeatures\fR assigned to them by the SLURM administrator.
+Nodes can have \fBfeatures\fR assigned to them by the Slurm administrator.
 Users can specify which of these \fBfeatures\fR are required by their job
 using the constraint option.
 Only nodes having features matching the job constraints will be used to
@@ -267,7 +267,7 @@ above when task/affinity plugin is enabled.
 
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
-Advise the SLURM controller that ensuing job steps will require \fIncpus\fR
+Advise the Slurm controller that ensuing job steps will require \fIncpus\fR
 number of processors per task.  Without this option, the controller will
 just try to allocate one processor per task.
 
@@ -324,7 +324,7 @@ to the directory where the command is executed.
 
 .TP
 \fB\-e\fR, \fB\-\-error\fR=<\fIfilename pattern\fR>
-Instruct SLURM to connect the batch script's standard error directly to the
+Instruct Slurm to connect the batch script's standard error directly to the
 file name specified in the "\fIfilename pattern\fR".
 By default both standard output and standard error are directed to the same file.
 For job arrays, the default file name is "slurm-%A_%a.out", "%A" is replaced
@@ -371,7 +371,7 @@ Much like \-\-nodelist, but the list is contained in a file of name
 \fInode file\fR.  The node names of the list may also span multiple lines
 in the file.    Duplicate node names in the file will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted by SLURM.
+will be sorted by Slurm.
 
 .TP
 \fB\-\-get\-user\-env\fR[=\fItimeout\fR][\fImode\fR]
@@ -388,7 +388,7 @@ The optional \fImode\fR value control the "su" options.
 With a \fImode\fR value of "S", "su" is executed without the "\-" option.
 With a \fImode\fR value of "L", "su" is executed with the "\-" option,
 replicating the login environment.
-If \fImode\fR not specified, the mode established at SLURM build time
+If \fImode\fR not specified, the mode established at Slurm build time
 is used.
 Example of use include "\-\-get\-user\-env", "\-\-get\-user\-env=10"
 "\-\-get\-user\-env=10L", and "\-\-get\-user\-env=S".
@@ -460,7 +460,7 @@ Ignore any "#PBS" options specified in the batch script.
 
 .TP
 \fB\-i\fR, \fB\-\-input\fR=<\fIfilename pattern\fR>
-Instruct SLURM to connect the batch script's standard input
+Instruct Slurm to connect the batch script's standard input
 directly to the file name specified in the "\fIfilename pattern\fR".
 
 By default, "/dev/null" is open on the batch script's standard input and both
@@ -513,7 +513,7 @@ MPI jobs) on that node will almost certainly suffer a fatal error, but with
 \-\-no\-kill, the job allocation will not be revoked so the user may launch
 new job steps on the remaining nodes in their allocation.
 
-By default SLURM terminates the entire job allocation if any node fails in its
+By default Slurm terminates the entire job allocation if any node fails in its
 range of allocated nodes.
 
 .TP
@@ -703,7 +703,7 @@ If you want greater control, try running a simple test code with the
 options "\-\-mem_bind=verbose,none" to determine
 the specific configuration.
 
-NOTE: To have SLURM always report on the selected memory binding for
+NOTE: To have Slurm always report on the selected memory binding for
 all commands executed in a shell, you can enable verbose mode by
 setting the SLURM_MEM_BIND environment variable value to "verbose".
 
@@ -787,7 +787,7 @@ of "k" (multiplies numeric value by 1,024) or "m" (multiplies numeric value by
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
 sbatch does not launch tasks, it requests an allocation of resources and
-submits a batch script. This option advises the SLURM controller that job
+submits a batch script. This option advises the Slurm controller that job
 steps run within the allocation will launch a maximum of \fInumber\fR
 tasks and to provide for sufficient resources.
 The default is one task per node, but note
@@ -937,7 +937,7 @@ of 32 connections are established (2 instances x 2 protocols x 2 networks x
 
 .TP
 \fB\-\-nice\fR[=\fIadjustment\fR]
-Run the job with an adjusted scheduling priority within SLURM.
+Run the job with an adjusted scheduling priority within Slurm.
 With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
 to 10000 (lowest priority). Only privileged users can specify
@@ -1007,11 +1007,11 @@ By specifying \fB\-\-overcommit\fR you are explicitly allowing more than one
 process per CPU. However no more than \fBMAX_TASKS_PER_NODE\fR tasks are
 permitted to execute per node.  NOTE: \fBMAX_TASKS_PER_NODE\fR is
 defined in the file \fIslurm.h\fR and is not a variable, it is set at
-SLURM build time.
+Slurm build time.
 
 .TP
 \fB\-o\fR, \fB\-\-output\fR=<\fIfilename pattern\fR>
-Instruct SLURM to connect the batch script's standard output directly to the
+Instruct Slurm to connect the batch script's standard output directly to the
 file name specified in the "\fIfilename pattern\fR".
 By default both standard output and standard error are directed to the same file.
 For job arrays, the default file name is "slurm-%A_%a.out", "%A" is replaced
@@ -1133,9 +1133,9 @@ Suppress informational messages from sbatch. Errors will still be displayed.
 .TP
 \fB\-\-qos\fR=<\fIqos\fR>
 Request a quality of service for the job.  QOS values can be defined
-for each user/cluster/account association in the SLURM database.
+for each user/cluster/account association in the Slurm database.
 Users will be limited to their association's defined set of qos's when
-the SLURM configuration parameter, AccountingStorageEnforce, includes
+the Slurm configuration parameter, AccountingStorageEnforce, includes
 "qos" in it's definition.
 
 .TP
@@ -1182,7 +1182,7 @@ CoreSpecCount and use the specialized resources on nodes it is allocated.
 \fB\-\-signal\fR=[B:]<\fIsig_num\fR>[@<\fIsig_time\fR>]
 When a job is within \fIsig_time\fR seconds of its end time,
 send it the signal \fIsig_num\fR.
-Due to the resolution of event handling by SLURM, the signal may
+Due to the resolution of event handling by Slurm, the signal may
 be sent up to 60 seconds earlier than specified.
 \fIsig_num\fR may either be a signal number or name (e.g. "10" or "USR1").
 \fIsig_time\fR must have an integer value between 0 and 65535.
@@ -1203,7 +1203,7 @@ task/affinity plugin is enabled.
 \fB\-\-switches\fR=<\fIcount\fR>[@<\fImax\-time\fR>]
 When a tree topology is used, this defines the maximum count of switches
 desired for the job allocation and optionally the maximum time to wait
-for that number of switches. If SLURM finds an allocation containing more
+for that number of switches. If Slurm finds an allocation containing more
 switches than the count specified, the job remains pending until it either finds
 an allocation with desired switch count or the time limit expires.
 It there is no switch count limit, there is no delay in starting the job.
@@ -1222,7 +1222,7 @@ requested time limit exceeds the partition's time limit, the job will
 be left in a PENDING state (possibly indefinitely).  The default time
 limit is the partition's default time limit.  When the time limit is reached,
 each task in each job step is sent SIGTERM followed by SIGKILL.  The
-interval between signals is specified by the SLURM configuration
+interval between signals is specified by the Slurm configuration
 parameter \fBKillWait\fR.  A time limit of zero requests that no time
 limit be imposed.  Acceptable time formats include "minutes",
 "minutes:seconds", "hours:minutes:seconds", "days\-hours",
@@ -1301,7 +1301,7 @@ by the supplied host list, additional resources will be allocated on other
 nodes as needed.
 Duplicate node names in the list will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted by SLURM.
+will be sorted by Slurm.
 
 .TP
 \fB\-\-wait\-all\-nodes\fR=<\fIvalue\fR>
@@ -1351,7 +1351,7 @@ Default from \fIblugene.conf\fR if not set.
 \fB\-\-conn\-type\fR=<\fItype\fR>
 Require the block connection type to be of a certain type.
 On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
-If NAV, or if not set, then SLURM will try to fit a what the
+If NAV, or if not set, then Slurm will try to fit a what the
 DefaultConnType is set to in the bluegene.conf if that isn't set the
 default is TORUS.
 You should not normally set this option.
@@ -1447,10 +1447,10 @@ Same as \fB\-m, \-\-distribution\fR
 Same as \fB\-\-exclusive\fR
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs
+Specifies the exit code generated when a Slurm error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
+various Slurm error conditions.
 .TP
 \fBSBATCH_EXPORT\fR
 Same as \fB\-\-export\fR
@@ -1542,7 +1542,7 @@ Max time waiting for requested switches. See \fB\-\-switches\fR
 Same as \fB\-\-wckey\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_STEP_KILLED_MSG_NODE_ID\fR=ID
 If set, only the specified node will log when the job or step are killed
@@ -1550,7 +1550,7 @@ by a signal.
 
 .SH "OUTPUT ENVIRONMENT VARIABLES"
 .PP
-The SLURM controller will set the following variables in the environment of
+The Slurm controller will set the following variables in the environment of
 the batch script.
 .TP
 \fBBASIL_RESERVATION_ID\fR
@@ -1761,15 +1761,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index 6d67f6927a2..3f9d3cd7b2e 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -1,16 +1,16 @@
 .TH sbcast "1" "Slurm Commands" "April 2015" "Slurm Commands"
 
 .SH "NAME"
-sbcast \- transmit a file to the nodes allocated to a SLURM job.
+sbcast \- transmit a file to the nodes allocated to a Slurm job.
 
 .SH "SYNOPSIS"
 \fBsbcast\fR [\-CfFjpstvV] SOURCE DEST
 
 .SH "DESCRIPTION"
 \fBsbcast\fR is used to transmit a file to all nodes allocated
-to the currently active SLURM job.
-This command should only be executed from within a SLURM batch
-job or within the shell spawned after a SLURM job's resource
+to the currently active Slurm job.
+This command should only be executed from within a Slurm batch
+job or within the shell spawned after a Slurm job's resource
 allocation.
 \fBSOURCE\fR is the name of a file on the current node.
 \fBDEST\fR should be the fully qualified pathname for the
@@ -87,11 +87,11 @@ these settings.)
 \fB\-t\fB \fIseconds\fR, fB\-\-timeout\fR=\fIseconds\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "AUTHORIZATION"
 
-When using the SLURM db, users who have AdminLevel's defined (Operator
+When using the Slurm db, users who have AdminLevel's defined (Operator
 or Admin) and users who are account coordinators are given the
 authority to invoke sbcast on other user's jobs.
 
@@ -116,15 +116,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/scancel.1 b/doc/man/man1/scancel.1
index d8af8a91f63..dea3a1e050b 100644
--- a/doc/man/man1/scancel.1
+++ b/doc/man/man1/scancel.1
@@ -137,7 +137,7 @@ If neither \fB\-\-batch\fR nor \fB\-\-signal\fR are used,
 the entire job will be terminated.
 
 When \fB\-\-batch\fR is used, the batch shell processes will be signaled.
-The child processes of the shell will not be signalled by SLURM, but
+The child processes of the shell will not be signalled by Slurm, but
 the shell may forward the signal.
 
 When \fB\-\-batch\fR is not used but \fB\-\-signal\fR is used,
@@ -184,7 +184,7 @@ environment variables, along with their corresponding options, are listed below.
 \fB\-\-wckey\fR=\fIwckey\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "NOTES"
 .LP
@@ -211,7 +211,7 @@ may then be used for post processing.
 
 .SH "AUTHORIZATION"
 
-When using the SLURM db, users who have AdminLevel's defined (Operator
+When using the Slurm db, users who have AdminLevel's defined (Operator
 or Admin) and users who are account coordinators are given the
 authority to invoke scancel on other user's jobs.
 
@@ -248,15 +248,15 @@ Copyright (C) 2008-2011 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index c170a480c2d..e7dd721ea96 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -158,7 +158,7 @@ If the specified partition is in use, the request is denied.
 \fBdetails\fP
 Causes the \fIshow\fP command to provide additional details where available.
 Job information will include CPUs and NUMA memory allocated on each node.
-Note that on computers with hyperthreading enabled and SLURM configured to
+Note that on computers with hyperthreading enabled and Slurm configured to
 allocate cores, each listed CPU represents one physical core.
 Each hyperthread on that core can be allocated a separate task, so a job's
 CPU count and task count may differ.
@@ -211,7 +211,7 @@ Print information one line per record.
 Print the Slurm job id and scheduled termination time corresponding to the
 supplied process id, \fIproc_id\fP, on the current node.  This will work only
 with processes on node on which scontrol is run, and only for those processes
-spawned by SLURM and their descendants.
+spawned by Slurm and their descendants.
 
 .TP
 \fBlistpids\fP [\fIjob_id\fP[.\fIstep_id\fP]] [\fINodeName\fP]
@@ -219,14 +219,14 @@ Print a listing of the process IDs in a job step (if JOBID.STEPID is provided),
 or all of the job steps in a job (if \fIjob_id\fP is provided), or all of the job
 steps in all of the jobs on the local node (if \fIjob_id\fP is not provided
 or \fIjob_id\fP is "*").  This will work only with processes on the node on
-which scontrol is run, and only for those processes spawned by SLURM and
-their descendants. Note that some SLURM configurations
+which scontrol is run, and only for those processes spawned by Slurm and
+their descendants. Note that some Slurm configurations
 (\fIProctrackType\fP value of \fIpgid\fP or \fIaix\fP)
 are unable to identify all processes associated with a job or job step.
 
 Note that the NodeName option is only really useful when you have multiple
 slurmd daemons running on the same host machine.  Multiple slurmd daemons on
-one host are, in general, only used by SLURM developers.
+one host are, in general, only used by Slurm developers.
 
 .TP
 \fBping\fP
@@ -244,7 +244,7 @@ Terminate the execution of scontrol.
 .TP
 \fBreboot_nodes\fP [\fINodeList\fP]
 Reboot all nodes in the system when they become idle using the
-\fBRebootProgram\fP as configured in SLURM's slurm.conf file.
+\fBRebootProgram\fP as configured in Slurm's slurm.conf file.
 Accepts an option list of nodes to reboot. By default all nodes are rebooted.
 NOTE: This command does not prevent additional jobs from being scheduled on
 these nodes, so many jobs can be executed on the nodes prior to them being
@@ -266,7 +266,7 @@ Prolog, SlurmctldLogFile, SlurmdLogFile, etc.).
 The Slurm controller (slurmctld) forwards the request all other daemons
 (slurmd daemon on each compute node). Running jobs continue execution.
 Most configuration parameters can be changed by just running this command,
-however, SLURM daemons should be shutdown and restarted if any of these
+however, Slurm daemons should be shutdown and restarted if any of these
 parameters are to be changed: AuthType, BackupAddr, BackupController,
 ControlAddr, ControlMach, PluginDir, StateSaveLocation, SlurmctldPort
 or SlurmdPort. The slurmctld daemon must be restarted if nodes are added to
@@ -280,12 +280,12 @@ Also see \fBhold\fR.
 
 .TP
 \fBrequeue\fP \fIjob_list\fP
-Requeue a running, suspended or finished SLURM batch job into pending state.
+Requeue a running, suspended or finished Slurm batch job into pending state.
 The job_list argument is a comma separated list of job IDs.
 
 .TP
 \fBrequeuehold\fP \fIjob_list\fP
-Requeue a running, suspended or finished SLURM batch job into pending state,
+Requeue a running, suspended or finished Slurm batch job into pending state,
 moreover the job is put in held state (priority zero).
 The job_list argument is a comma separated list of job IDs.
 A held job can be released using scontrol to reset its priority (e.g.
@@ -407,15 +407,15 @@ If a suspended job is requeued, it will be placed in a held state.
 
 .TP
 \fBtakeover\fP
-Instruct SLURM's backup controller (slurmctld) to take over system control.
-SLURM's backup controller requests control from the primary and waits for
+Instruct Slurm's backup controller (slurmctld) to take over system control.
+Slurm's backup controller requests control from the primary and waits for
 its termination. After that, it switches from backup mode to controller
 mode. If primary controller can not be contacted, it directly switches to
-controller mode. This can be used to speed up the SLURM controller
+controller mode. This can be used to speed up the Slurm controller
 fail\-over mechanism when the primary node is down.
 This can be used to minimize disruption if the computer executing the
-primary SLURM controller is scheduled down.
-(Note: SLURM's primary controller will take the control back at startup.)
+primary Slurm controller is scheduled down.
+(Note: Slurm's primary controller will take the control back at startup.)
 
 .TP
 \fBuhold\fP \fIjob_list\fP
@@ -452,7 +452,7 @@ Display the version number of scontrol being executed.
 .TP
 \fBwait_job\fP \fIjob_id\fP
 Wait until a job andall of its nodes are ready for use or the job has entered
-some termination state. This option is particularly useful in the SLURM Prolog
+some termination state. This option is particularly useful in the Slurm Prolog
 or in the batch script itself if nodes are powered down and restarted
 automatically as needed.
 
@@ -680,18 +680,18 @@ a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR,
 or a date and time as \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR.  You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
-and you can tell SLURM to run the job today with the keyword
+and you can tell Slurm to run the job today with the keyword
 \fItoday\fR and to run the job tomorrow with the keyword
 \fItomorrow\fR.
 .RS
 .PP
 Notes on date/time specifications:
  \- although the 'seconds' field of the HH:MM:SS time specification is
-allowed by the code, note that the poll time of the SLURM scheduler
+allowed by the code, note that the poll time of the Slurm scheduler
 is not precise enough to guarantee dispatch of the job on the exact
 second.  The job will be eligible to start on the next poll
 following the specified time. The exact poll interval depends on the
-SLURM scheduler (e.g., 60 seconds with the default sched/builtin).
+Slurm scheduler (e.g., 60 seconds with the default sched/builtin).
  \- if no time (HH:MM:SS) is specified, the default is (00:00:00).
  \- if a date is specified without a year (e.g., MM/DD) then the current
 year is assumed, unless the combination of MM/DD and HH:MM:SS has
@@ -700,7 +700,7 @@ already passed for that year, in which case the next year is used.
 .TP
 \fISwitches\fP=<count>[@<max\-time\-to\-wait>]
 When a tree topology is used, this defines the maximum count of switches
-desired for the job allocation. If SLURM finds an allocation containing more
+desired for the job allocation. If Slurm finds an allocation containing more
 switches than the count specified, the job remain pending until it either finds
 an allocation with desired switch count or the time limit expires. By default
 there is no switch count limit and no time limit delay. Set the count
@@ -1085,7 +1085,7 @@ this partition or qos.
 .TP
 \fIHidden\fP=<yes|no>
 Specify if the partition and its jobs should be hidden from view.
-Hidden partitions will by default not be reported by SLURM APIs
+Hidden partitions will by default not be reported by Slurm APIs
 or commands.
 Possible values are "YES" and "NO".
 
@@ -1289,7 +1289,7 @@ a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR,
 or a date and time as \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR.  You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
-and you can tell SLURM to run the job today with the keyword
+and you can tell Slurm to run the job today with the keyword
 \fItoday\fR and to run the job tomorrow with the keyword
 \fItomorrow\fR. You cannot update the \fIStartTime\fP of a reservation
 in \fIACTIVE\fP state.
@@ -1518,7 +1518,7 @@ Larger values may adversely impact the application performance.
 Same as \fB\-\-clusters\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_TIME_FORMAT\fR
 Specify the format used to report time stamps. A value of \fIstandard\fR, the
@@ -1535,7 +1535,7 @@ A valid strftime() format can also be specified. For example, a value of
 
 .SH "AUTHORIZATION"
 
-When using the SLURM db, users who have AdminLevel's defined (Operator
+When using the Slurm db, users who have AdminLevel's defined (Operator
 or Admin) and users who are account coordinators are given the
 authority to view and modify jobs, reservations, nodes, etc., as
 defined in the following table \- regardless of whether a PrivateData
@@ -1672,15 +1672,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010-2014 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sdiag.1 b/doc/man/man1/sdiag.1
index 8de73e68bed..4fa85637a6a 100644
--- a/doc/man/man1/sdiag.1
+++ b/doc/man/man1/sdiag.1
@@ -2,7 +2,7 @@
 
 .SH "NAME"
 .LP
-sdiag \- Scheduling diagnostic tool for SLURM
+sdiag \- Scheduling diagnostic tool for Slurm
 
 .SH "SYNOPSIS"
 .LP
@@ -13,7 +13,7 @@ sview
 sdiag shows information related to slurmctld execution about: threads, agents,
 jobs, and scheduling algorithms. The goal is to obtain data from slurmctld
 behaviour helping to adjust configuration parameters or queues policies. The
-main reason behind is to know SLURM behaviour under systems with a high throughput.
+main reason behind is to know Slurm behaviour under systems with a high throughput.
 .LP
 It has two execution modes. The default mode \fB\-\-all\fR shows several counters
 and statistics explained later, and there is another execution option
@@ -31,7 +31,7 @@ bottleneck.
 
 .TP
 \fBAgent queue size\fR
-SLURM design has scalability in mind and sending messages to thousands of nodes
+Slurm design has scalability in mind and sending messages to thousands of nodes
 is not a trivial task. The agent mechanism helps to control communication
 between the slurm daemons and the controller for a best effort. If this values
 is close to MAX_AGENT_CNT there could be some delays affecting jobs management.
@@ -231,19 +231,19 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "COPYING"
 Copyright (C) 2010-2011 Barcelona Supercomputing Center.
 .br
 Copyright (C) 2010\-2014 SchedMD LLC.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sh5util.1 b/doc/man/man1/sh5util.1
index 1b9811af507..3a3788dd652 100644
--- a/doc/man/man1/sh5util.1
+++ b/doc/man/man1/sh5util.1
@@ -3,7 +3,7 @@
 .SH "NAME"
 .LP
 sh5util \- Tool for merging HDF5 files from the acct_gather_profile
-plugin that gathers detailed data for jobs running under SLURM
+plugin that gathers detailed data for jobs running under Slurm
 
 .SH "SYNOPSIS"
 .LP
@@ -174,12 +174,12 @@ sh5util \-j 42 \-\-series=Energy \-\-data=power
 Copyright (C) 2013 Bull.
 .br
 Copyright (C) 2013 SchedMD LLC.
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1
index 038e6eb17b3..36697eeb59a 100644
--- a/doc/man/man1/sinfo.1
+++ b/doc/man/man1/sinfo.1
@@ -1,13 +1,13 @@
 .TH sinfo "1" "Slurm Commands" "April 2015" "Slurm Commands"
 
 .SH "NAME"
-sinfo \- view information about SLURM nodes and partitions.
+sinfo \- view information about Slurm nodes and partitions.
 
 .SH "SYNOPSIS"
 \fBsinfo\fR [\fIOPTIONS\fR...]
 .SH "DESCRIPTION"
 \fBsinfo\fR is used to view partition and node information for a
-system running SLURM.
+system running Slurm.
 
 .SH "OPTIONS"
 
@@ -264,7 +264,7 @@ If set only report state information for responding nodes.
 .TP
 \fB\-R\fR, \fB\-\-list\-reasons\fR
 List reasons nodes are in the down, drained, fail or failing state.
-When nodes are in these states SLURM supports optional inclusion
+When nodes are in these states Slurm supports optional inclusion
 of a "reason" string by an administrator.
 This option will display the first 35 characters of the reason
 field and list of nodes with that reason for all nodes that are,
@@ -291,7 +291,7 @@ separated by commas.  The field specifications may be preceded
 by "+" or "\-" for ascending (default) and descending order
 respectively.  The partition field specification, "P", may be
 preceded by a "#" to report partitions in the same order that
-they appear in SLURM's  configuration file, \fBslurm.conf\fR.
+they appear in Slurm's  configuration file, \fBslurm.conf\fR.
 For example, a sort value of "+P,\-m" requests that records
 be printed in order of increasing partition name and within a
 partition by decreasing memory size.  The default value of sort
@@ -315,7 +315,7 @@ used to filtering nodes by the responding flag.
 
 .TP
 \fB\-T\fR, \fB\-\-reservation\fR
-Only display information about SLURM reservations.
+Only display information about Slurm reservations.
 
 .TP
 \fB\-\-usage\fR
@@ -450,16 +450,16 @@ one or more jobs are in the process of COMPLETING.
 \fBCOMPLETING\fR
 All jobs associated with this node are in the process of
 COMPLETING.  This node state will be removed when
-all of the job's processes have terminated and the SLURM
+all of the job's processes have terminated and the Slurm
 epilog program (if any) has terminated. See the \fBEpilog\fR
 parameter description in the \fBslurm.conf\fR man page for
 more information.
 .TP
 \fBDOWN\fR
-The node is unavailable for use. SLURM can automatically
+The node is unavailable for use. Slurm can automatically
 place nodes in this state if some failure occurs. System
 administrators may also explicitly place nodes in this state. If
-a node resumes normal operation, SLURM can automatically
+a node resumes normal operation, Slurm can automatically
 return it to service. See the \fBReturnToService\fR
 and \fBSlurmdTimeout\fR parameter descriptions in the
 \fBslurm.conf\fR(5) man page for more information.
@@ -480,9 +480,9 @@ node\fR command in the \fBscontrol\fR(1) man page or the
 .TP
 \fBERROR\fR
 The node is currently in an error state and not capable of running any jobs.
-SLURM can automatically place nodes in this state if some failure occurs.
+Slurm can automatically place nodes in this state if some failure occurs.
 System administrators may also explicitly place nodes in this state. If
-a node resumes normal operation, SLURM can automatically
+a node resumes normal operation, Slurm can automatically
 return it to service. See the \fBReturnToService\fR
 and \fBSlurmdTimeout\fR parameter descriptions in the
 \fBslurm.conf\fR(5) man page for more information.
@@ -527,7 +527,7 @@ The node is currently in the process of being powered up.
 The node is in an advanced reservation and not generally available.
 .TP
 \fBUNKNOWN\fR
-The SLURM controller has just started and the node's state
+The Slurm controller has just started and the node's state
 has not yet been determined.
 
 .SH "ENVIRONMENT VARIABLES"
@@ -553,7 +553,7 @@ Commandline options will always override these settings.)
 Same as \fB\-\-clusters\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_TIME_FORMAT\fR
 Specify the format used to report time stamps. A value of \fIstandard\fR, the
@@ -641,15 +641,15 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/slurm.1 b/doc/man/man1/slurm.1
index 111dae7b514..0d9517740f5 100644
--- a/doc/man/man1/slurm.1
+++ b/doc/man/man1/slurm.1
@@ -6,9 +6,9 @@ Slurm \- Slurm Workload Manager overview.
 .SH "DESCRIPTION"
 The Slurm Workload Manager is an open source,
 fault-tolerant, and highly scalable cluster management and job scheduling system
-for large and small Linux clusters. SLURM requires no kernel modifications for
+for large and small Linux clusters. Slurm requires no kernel modifications for
 its operation and is relatively self-contained. As a cluster resource manager,
-SLURM has three key functions. First, it allocates exclusive and/or non-exclusive
+Slurm has three key functions. First, it allocates exclusive and/or non-exclusive
 access to resources (compute nodes) to users for some duration of time so they
 can perform work. Second, it provides a framework for starting, executing, and
 monitoring work (normally a parallel job) on the set of allocated nodes.
@@ -23,7 +23,7 @@ Slurm has a centralized manager, \fBslurmctld\fR, to monitor resources and
 work. There may also be a backup manager to assume those responsibilities in the
 event of failure. Each compute server (node) has a \fBslurmd\fR daemon, which
 can be compared to a remote shell: it waits for work, executes that work, returns
-status, and waits for more work. An optional \fBslurmdbd\fR (SLURM DataBase Daemon)
+status, and waits for more work. An optional \fBslurmdbd\fR (Slurm DataBase Daemon)
 can be used for accounting purposes and to maintain resource limit information.
 
 Basic user tools include \fBsrun\fR to initiate jobs,
@@ -34,7 +34,7 @@ information. APIs are available for all functions.
 
 Slurm configuration is maintained in the \fBslurm.conf\fR file.
 
-Man pages are available for all SLURM commands, daemons, APIs, plus the
+Man pages are available for all Slurm commands, daemons, APIs, plus the
 \fBslurm.conf\fR file.
 Extensive documentation is also available on the internet at
 \fB<http://slurm.schedmd.com/>\fR.
@@ -47,7 +47,7 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
 Slurm is free software; you can redistribute it and/or modify it under
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 2ec354974d1..9970c48bdd4 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -1,14 +1,14 @@
 .TH smap "1" "Slurm Commands" "April 2015" "Slurm Commands"
 
 .SH "NAME"
-smap \- graphically view information about SLURM jobs, partitions, and set
+smap \- graphically view information about Slurm jobs, partitions, and set
 configurations parameters.
 
 .SH "SYNOPSIS"
 \fBsmap\fR [\fIOPTIONS\fR...]
 .SH "DESCRIPTION"
 \fBsmap\fR is used to graphically view job, partition and node information
-for a system running SLURM.
+for a system running Slurm.
 Note that information about nodes and partitions to which you lack
 access will always be displayed to avoid obvious gaps in the output.
 This is equivalent to the \fB\-\-all\fR option of the \fBsinfo\fR and
@@ -403,16 +403,16 @@ one or more jobs are in the process of COMPLETING.
 \fBCOMPLETING\fR
 All jobs associated with this node are in the process of
 COMPLETING.  This node state will be removed when
-all of the job's processes have terminated and the SLURM
+all of the job's processes have terminated and the Slurm
 epilog program (if any) has terminated. See the \fBEpilog\fR
 parameter description in the \fBslurm.conf\fR man page for
 more information.
 .TP
 \fBDOWN\fR
-The node is unavailable for use. SLURM can automatically
+The node is unavailable for use. Slurm can automatically
 place nodes in this state if some failure occurs. System
 administrators may also explicitly place nodes in this state. If
-a node resumes normal operation, SLURM can automatically
+a node resumes normal operation, Slurm can automatically
 return it to service. See the \fBReturnToService\fR
 and \fBSlurmdTimeout\fR parameter descriptions in the
 \fBslurm.conf\fR(5) man page for more information.
@@ -450,7 +450,7 @@ The node is not allocated to any jobs and is available for use.
 The node is currently in a reservation with a flag value of "maintainence".
 .TP
 \fBUNKNOWN\fR
-The SLURM controller has just started and the node's state
+The Slurm controller has just started and the node's state
 has not yet been determined.
 
 .SH "JOB STATE CODES"
@@ -504,7 +504,7 @@ The following environment variables can be used to override settings
 compiled into smap.
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "COPYING"
 Copyright (C) 2004\-2007 The Regents of the University of California.
@@ -514,15 +514,15 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sprio.1 b/doc/man/man1/sprio.1
index d902530e7ad..fe45b37a84d 100644
--- a/doc/man/man1/sprio.1
+++ b/doc/man/man1/sprio.1
@@ -154,7 +154,7 @@ the following environment variables.
 Same as \fB\-\-clusters\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "EXAMPLES"
 .eo
@@ -225,15 +225,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 3d37b56057c..f7a3be4ad3f 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -1,14 +1,14 @@
 .TH squeue "1" "Slurm Commands" "April 2015" "Slurm Commands"
 
 .SH "NAME"
-squeue \- view information about jobs located in the SLURM scheduling queue.
+squeue \- view information about jobs located in the Slurm scheduling queue.
 
 .SH "SYNOPSIS"
 \fBsqueue\fR [\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
 \fBsqueue\fR is used to view job and job step information for jobs managed by
-SLURM.
+Slurm.
 
 .SH "OPTIONS"
 
@@ -924,7 +924,7 @@ Any of these options may be explicitly changed as desired by
 combining the \fB\-\-start\fR option with other option values
 (e.g. to use a different output format).
 The expected start time of pending jobs is only available if the
-SLURM is configured to use the backfill scheduling plugin.
+Slurm is configured to use the backfill scheduling plugin.
 
 .TP
 \fB\-t <state_list>\fR, \fB\-\-states=<state_list>\fR
@@ -1074,7 +1074,7 @@ The job is waiting its advanced reservation to become available.
 The job is waiting for resources to become available.
 .TP
 \fBSystemFailure\fR
-Failure of the SLURM system, a file system, the network, etc.
+Failure of the Slurm system, a file system, the network, etc.
 .TP
 \fBTimeLimit\fR
 The job exhausted its time limit.
@@ -1154,7 +1154,7 @@ Larger values may adversely impact the application performance.
 Same as \fB\-\-clusters\fR
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_TIME_FORMAT\fR
 Specify the format used to report time stamps. A value of \fIstandard\fR, the
@@ -1271,15 +1271,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2014 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 57079a09787..021ddc7d473 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -8,7 +8,7 @@ sreport \- Generate reports from the slurm accounting data.
 
 .SH "DESCRIPTION"
 \fBsreport\fR is used to generate reports of job usage and cluster
-utilization for SLURM jobs saved to the SLURM Database,
+utilization for Slurm jobs saved to the Slurm Database,
 \fBslurmdbd\fR.
 
 .SH "OPTIONS"
@@ -410,7 +410,7 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .in 0
 .SH "EXAMPLES"
@@ -441,15 +441,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index d1ecda05b73..e2ee9775ddf 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -7,7 +7,7 @@ srun \- Run parallel jobs
 \fBsrun\fR [\fIOPTIONS\fR...]  \fIexecutable \fR[\fIargs\fR...]
 
 .SH "DESCRIPTION"
-Run a parallel job on cluster managed by SLURM.  If necessary, srun will
+Run a parallel job on cluster managed by Slurm.  If necessary, srun will
 first create a resource allocation in which to run the parallel job.
 
 The following document describes the the influence of various options on the
@@ -28,7 +28,7 @@ command.
 .TP
 \fB\-\-acctg\-freq\fR
 Define the job accounting and profiling sampling intervals.
-This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's
+This can be used to override the \fIJobAcctGatherFrequency\fR parameter in Slurm's
 configuration file, \fIslurm.conf\fR.
 The supported format is follows:
 .RS
@@ -71,7 +71,7 @@ The default value for the task sampling interval
 is 30. The default value for all other intervals is 0.
 An interval of 0 disables sampling of the specified type.
 If the task sampling interval is 0, accounting
-information is collected only at job termination (reducing SLURM
+information is collected only at job termination (reducing Slurm
 interference with the job).
 .br
 .br
@@ -124,7 +124,7 @@ a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
 format \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR. You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR,
-\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run
+\fIdays\fR, or \fIweeks\fR and you can tell Slurm to run
 the job today with the keyword \fItoday\fR and to run the
 job tomorrow with the keyword \fItomorrow\fR.
 The value may be changed after job submission using the
@@ -141,11 +141,11 @@ For example:
 .PP
 Notes on date/time specifications:
  \- Although the 'seconds' field of the HH:MM:SS time specification is
-allowed by the code, note that the poll time of the SLURM scheduler
+allowed by the code, note that the poll time of the Slurm scheduler
 is not precise enough to guarantee dispatch of the job on the exact
 second.  The job will be eligible to start on the next poll
 following the specified time. The exact poll interval depends on the
-SLURM scheduler (e.g., 60 seconds with the default sched/builtin).
+Slurm scheduler (e.g., 60 seconds with the default sched/builtin).
  \- If no time (HH:MM:SS) is specified, the default is (00:00:00).
  \- If a date is specified without a year (e.g., MM/DD) then the current
 year is assumed, unless the combination of MM/DD and HH:MM:SS has
@@ -174,7 +174,7 @@ An arbitrary comment.
 
 .TP
 \fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
-Nodes can have \fBfeatures\fR assigned to them by the SLURM administrator.
+Nodes can have \fBfeatures\fR assigned to them by the Slurm administrator.
 Users can specify which of these \fBfeatures\fR are required by their job
 using the constraint option.
 Only nodes having features matching the job constraints will be used to
@@ -246,7 +246,7 @@ Used only when the task/affinity or task/cgroup plugin is enabled.
 The configuration parameter \fBTaskPluginParam\fR may override these options.
 For example, if \fBTaskPluginParam\fR is configured to bind to cores,
 your job will not be able to bind tasks to sockets.
-NOTE: To have SLURM always report on the selected CPU binding for all
+NOTE: To have Slurm always report on the selected CPU binding for all
 commands executed in a shell, you can enable verbose mode by setting
 the SLURM_CPU_BIND environment variable value to "verbose".
 
@@ -692,7 +692,7 @@ SLURM_JOB_ID environment variable was set.
 \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR[=0|1]
 Controls whether or not to terminate a job if any task exits with a non\-zero
 exit code. If this option is not specified, the default action will be based
-upon the SLURM configuration parameter of \fBKillOnBadExit\fR. If this option
+upon the Slurm configuration parameter of \fBKillOnBadExit\fR. If this option
 is specified, it will take precedence over \fBKillOnBadExit\fR. An option
 argument of zero will not terminate the job. A non\-zero argument or no
 argument will terminate the job.
@@ -718,7 +718,7 @@ The default action is to terminate the job upon node failure.
 .TP
 \fB\-\-launch-cmd\fR
 Print external launch command instead of running job normally through
-SLURM. This option is only valid if using something other than the
+Slurm. This option is only valid if using something other than the
 \fIlaunch/slurm\fR plugin.
 
 .TP
@@ -911,7 +911,7 @@ If you want greater control, try running a simple test code with the
 options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine
 the specific configuration.
 
-NOTE: To have SLURM always report on the selected memory binding for
+NOTE: To have Slurm always report on the selected memory binding for
 all commands executed in a shell, you can enable verbose mode by
 setting the SLURM_MEM_BIND environment variable value to "verbose".
 
@@ -970,7 +970,7 @@ Specify a minimum number of logical cpus/processors per node.
 .TP
 \fB\-\-msg\-timeout\fR=<\fIseconds\fR>
 Modify the job launch message timeout.
-The default value is \fBMessageTimeout\fR in the SLURM configuration file slurm.conf.
+The default value is \fBMessageTimeout\fR in the Slurm configuration file slurm.conf.
 Changes to this are typically not recommended, but could be useful to diagnose problems.
 
 .TP
@@ -1201,7 +1201,7 @@ of 32 connections are established (2 instances x 2 protocols x 2 networks x
 
 .TP
 \fB\-\-nice\fR[=\fIadjustment\fR]
-Run the job with an adjusted scheduling priority within SLURM.
+Run the job with an adjusted scheduling priority within Slurm.
 With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
 to 10000 (lowest priority). Only privileged users can specify
@@ -1267,7 +1267,7 @@ By specifying \fB\-\-overcommit\fR you are explicitly allowing more than one
 process per CPU. However no more than \fBMAX_TASKS_PER_NODE\fR tasks are
 permitted to execute per node.  NOTE: \fBMAX_TASKS_PER_NODE\fR is
 defined in the file \fIslurm.h\fR and is not a variable, it is set at
-SLURM build time.
+Slurm build time.
 
 .TP
 \fB\-o\fR, \fB\-\-output\fR=<\fImode\fR>
@@ -1417,9 +1417,9 @@ running job.
 .TP
 \fB\-\-qos\fR=<\fIqos\fR>
 Request a quality of service for the job.  QOS values can be defined
-for each user/cluster/account association in the SLURM database.
+for each user/cluster/account association in the Slurm database.
 Users will be limited to their association's defined set of qos's when
-the SLURM configuration parameter, AccountingStorageEnforce, includes
+the Slurm configuration parameter, AccountingStorageEnforce, includes
 "qos" in it's definition.
 
 .TP
@@ -1482,7 +1482,7 @@ CoreSpecCount and use the specialized resources on nodes it is allocated.
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
 When a job is within \fIsig_time\fR seconds of its end time,
 send it the signal \fIsig_num\fR.
-Due to the resolution of event handling by SLURM, the signal may
+Due to the resolution of event handling by Slurm, the signal may
 be sent up to 60 seconds earlier than specified.
 \fIsig_num\fR may either be a signal number or name (e.g. "10" or "USR1").
 \fIsig_time\fR must have an integer value between 0 and 65535.
@@ -1527,7 +1527,7 @@ task/affinity plugin is enabled.
 \fB\-\-switches\fR=<\fIcount\fR>[@<\fImax\-time\fR>]
 When a tree topology is used, this defines the maximum count of switches
 desired for the job allocation and optionally the maximum time to wait
-for that number of switches. If SLURM finds an allocation containing more
+for that number of switches. If Slurm finds an allocation containing more
 switches than the count specified, the job remains pending until it either finds
 an allocation with desired switch count or the time limit expires.
 It there is no switch count limit, there is no delay in starting the job.
@@ -1563,7 +1563,7 @@ limit is for the job, all job steps are signaled. If the time limit is
 for a single job step within an existing job allocation, only that job
 step will be affected. A job time limit supersedes all job step time
 limits. The interval between SIGTERM and SIGKILL is specified by the
-SLURM configuration parameter \fBKillWait\fR.  A time limit of zero
+Slurm configuration parameter \fBKillWait\fR.  A time limit of zero
 requests that no time limit be imposed.  Acceptable time formats
 include "minutes", "minutes:seconds", "hours:minutes:seconds",
 "days\-hours", "days\-hours:minutes" and "days\-hours:minutes:seconds".
@@ -1701,8 +1701,8 @@ if it contains a "/"character.
 
 .TP
 \fB\-Z\fR, \fB\-\-no\-allocate\fR
-Run the specified tasks on a set of nodes without creating a SLURM
-"job" in the SLURM queue structure, bypassing the normal resource
+Run the specified tasks on a set of nodes without creating a Slurm
+"job" in the Slurm queue structure, bypassing the normal resource
 allocation step.  The list of nodes must be specified with the
 \fB\-w\fR, \fB\-\-nodelist\fR option.  This is a privileged option
 only available for the users "SlurmUser" and "root".
@@ -1725,7 +1725,7 @@ Default from \fIblugene.conf\fR if not set.
 \fB\-\-conn\-type\fR=<\fItype\fR>
 Require the block connection type to be of a certain type.
 On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
-If NAV, or if not set, then SLURM will try to fit a what the
+If NAV, or if not set, then Slurm will try to fit a what the
 DefaultConnType is set to in the bluegene.conf if that isn't set the
 default is TORUS.
 You should not normally set this option.
@@ -1967,7 +1967,7 @@ large processor counts (and large PMI data sets), higher values
 may be required.
 .TP
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 .TP
 \fBSLURM_ACCOUNT\fR
 Same as \fB\-A, \-\-account\fR
@@ -2005,7 +2005,7 @@ Same as \fB\-c, \-\-cpus\-per\-task\fR
 \fBSLURM_DEBUG\fR
 Same as \fB\-v, \-\-verbose\fR
 .TP
-\fBSLURMD_DEBUG\fR
+\fBSlurmD_DEBUG\fR
 Same as \fB\-d, \-\-slurmd\-debug\fR
 .TP
 \fBSLURM_DEPENDENCY\fR
@@ -2027,17 +2027,17 @@ Same as \fB\-\-epilog\fR
 Same as \fB\-\-exclusive\fR
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs
+Specifies the exit code generated when a Slurm error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
+various Slurm error conditions.
 Also see \fBSLURM_EXIT_IMMEDIATE\fR.
 .TP
 \fBSLURM_EXIT_IMMEDIATE\fR
 Specifies the exit code generated when the \fB\-\-immediate\fR option
 is used and resources are not currently available.
 This can be used by a script to distinguish application exit codes from
-various SLURM error conditions.
+various Slurm error conditions.
 Also see \fBSLURM_EXIT_ERROR\fR.
 .TP
 \fBSLURM_GEOMETRY\fR
@@ -2229,7 +2229,7 @@ Name of the cluster on which the job is executing.
 \-\-cpu_bind type (none,rank,map_cpu:,mask_cpu:).
 .TP
 \fBSLURM_CPU_BIND_LIST\fR
-\-\-cpu_bind map or mask list (list of SLURM CPU IDs or masks for this node,
+\-\-cpu_bind map or mask list (list of Slurm CPU IDs or masks for this node,
 CPU_ID = Board_ID x threads_per_board +
 Socket_ID x threads_per_socket +
 Core_ID x threads_per_core + Thread_ID).
@@ -2421,24 +2421,24 @@ MPI use depends upon the type of MPI being used.
 There are three fundamentally different modes of operation used
 by these various MPI implementation.
 
-1. SLURM directly launches the tasks and performs initialization
+1. Slurm directly launches the tasks and performs initialization
 of communications (Quadrics MPI, MPICH2, MPICH-GM, MVAPICH, MVAPICH2
 and some MPICH1 modes). For example: "srun \-n16 a.out".
 
-2. SLURM creates a resource allocation for the job and then
-mpirun launches tasks using SLURM's infrastructure (OpenMPI,
+2. Slurm creates a resource allocation for the job and then
+mpirun launches tasks using Slurm's infrastructure (OpenMPI,
 LAM/MPI, HP-MPI and some MPICH1 modes).
 
-3. SLURM creates a resource allocation for the job and then
-mpirun launches tasks using some mechanism other than SLURM,
+3. Slurm creates a resource allocation for the job and then
+mpirun launches tasks using some mechanism other than Slurm,
 such as SSH or RSH (BlueGene MPI and some MPICH1 modes).
-These tasks initiated outside of SLURM's monitoring
-or control. SLURM's epilog should be configured to purge
+These tasks initiated outside of Slurm's monitoring
+or control. Slurm's epilog should be configured to purge
 these tasks when the job's allocation is relinquished.
 
 See \fIhttp://slurm.schedmd.com/mpi_guide.html\fR
 for more information on use of these various MPI implementation
-with SLURM.
+with Slurm.
 
 .SH "MULTIPLE PROGRAM CONFIGURATION"
 Comments in the configuration file must have a "#" in column one.
@@ -2645,15 +2645,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/srun_cr.1 b/doc/man/man1/srun_cr.1
index a180e790fdf..e89d46619bc 100644
--- a/doc/man/man1/srun_cr.1
+++ b/doc/man/man1/srun_cr.1
@@ -10,7 +10,7 @@ srun_cr \- run parallel jobs with checkpoint/restart support
 The design of \fBsrun_cr\fR is inspired by \fBmpiexec_cr\fR from MVAPICH2 and
 \fBcr_restart\fR form BLCR.
 It is a wrapper around the \fBsrun\fR command to enable batch job
-checkpoint/restart support when used with SLURM's \fBcheckpoint/blcr\fR plugin.
+checkpoint/restart support when used with Slurm's \fBcheckpoint/blcr\fR plugin.
 
 .SH "OPTIONS"
 
@@ -30,7 +30,7 @@ SIGCHLD will be captured to mimic the exit status of \fBsrun\fR when it exits.
 Then \fBsrun_cr\fR loops waiting for termination of tasks being launched
 from \fBsrun\fR.
 
-The step launch logic of SLURM is augmented to check if \fBsrun\fR is running
+The step launch logic of Slurm is augmented to check if \fBsrun\fR is running
 under \fBsrun_cr\fR.
 If true, the environment variable \fBSLURM_SRUN_CR_SOCKET\fR should be present,
 the value of which is the address of a Unix domain socket created and listened
@@ -40,7 +40,7 @@ the job ID, step ID and the nodes allocated to the step to \fBsrun_cr\fR.
 
 Upon checkpoint, \fRsrun_cr\fR checks to see if the tasks have been launched.
 If not \fRsrun_cr\fR first forwards the checkpoint request to the tasks by
-calling the SLURM API \fBslurm_checkpoint_tasks()\fR before dumping its process
+calling the Slurm API \fBslurm_checkpoint_tasks()\fR before dumping its process
 context.
 
 Upon restart, \fBsrun_cr\fR checks to see if the tasks have been previously
@@ -55,15 +55,15 @@ execution of the tasks from the previous checkpoint.
 Copyright (C) 2009 National University of Defense Technology, China.
 Produced at National University of Defense Technology, China (cf, DISCLAIMER).
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sshare.1 b/doc/man/man1/sshare.1
index 86bb3c10e34..51c7bca092d 100644
--- a/doc/man/man1/sshare.1
+++ b/doc/man/man1/sshare.1
@@ -7,12 +7,12 @@ sshare \- Tool for listing the shares of associations to a cluster.
 \fBsshare\fR [\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
-\fBsshare\fR is used to view SLURM share information.  This command is
+\fBsshare\fR is used to view Slurm share information.  This command is
 only viable when running with the priority/multifactor plugin.
 The sshare information is derived from a database with the interface
-being provided by \fBslurmdbd\fR (SLURM Database daemon) which is
+being provided by \fBslurmdbd\fR (Slurm Database daemon) which is
 read in from the slurmctld and used to process the shares available
-to a given association.  sshare provides SLURM share information of
+to a given association.  sshare provides Slurm share information of
 Account, User, Raw Shares, Normalized Shares, Raw Usage, Normalized
 Usage, Effective Usage, the Fair-share factor, the GrpCPUMins limit
 and accumulated currently running CPU-minutes for each association.
@@ -157,7 +157,7 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "EXAMPLES"
 .eo
@@ -176,15 +176,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index cb55a237ff9..9d69ade14f9 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -10,7 +10,7 @@ of a running job/step.
 
 .SH "DESCRIPTION"
 .PP
-Status information for running jobs invoked with SLURM.
+Status information for running jobs invoked with Slurm.
 .PP
 The
 .BR "sstat "
@@ -236,7 +236,7 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "EXAMPLES"
 
@@ -254,15 +254,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/strigger.1 b/doc/man/man1/strigger.1
index f13786e1fff..c551be0446d 100644
--- a/doc/man/man1/strigger.1
+++ b/doc/man/man1/strigger.1
@@ -285,7 +285,7 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "EXAMPLES"
 Execute the program "/usr/sbin/primary_slurmctld_failure" whenever the
@@ -308,7 +308,7 @@ strigger \-\-set \-\-primary_slurmctld_failure \\
 Execute the program "/usr/sbin/slurm_admin_notify" whenever
 any node in the cluster goes down. The subject line will include
 the node names which have entered the down state (passed as an
-argument to the script by SLURM).
+argument to the script by Slurm).
 
 .nf
 > cat /usr/sbin/slurm_admin_notify
@@ -383,15 +383,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1
index 370f6abce78..c990d43bc79 100644
--- a/doc/man/man1/sview.1
+++ b/doc/man/man1/sview.1
@@ -2,7 +2,7 @@
 
 .SH "NAME"
 .LP
-sview \- graphical user interface to view and modify SLURM state.
+sview \- graphical user interface to view and modify Slurm state.
 
 .SH "SYNOPSIS"
 .LP
@@ -10,7 +10,7 @@ sview
 
 .SH "DESCRIPTION"
 .LP
-sview can be used to view SLURM configuration, job,
+sview can be used to view Slurm configuration, job,
 step, node and partitions state information.
 Authorized users can also modify select information.
 .LP
@@ -29,7 +29,7 @@ Right\-click on a line of the display to get more information about the record.
 .LP
 There is an \fIAdmin Mode\fR option which permits the user root to modify many of
 the fields displayed, such as node state or job time limit.
-In the mode, a \fISLURM Reconfigure\fR Action is also available.
+In the mode, a \fISlurm Reconfigure\fR Action is also available.
 It is recommended that \fIAdmin Mode\fR be used only while modifications are
 actively being made.
 Disable \fIAdmin Mode\fR immediately after the changes to avoid possibly making
@@ -42,11 +42,11 @@ environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.
+The location of the Slurm configuration file.
 
 .SH "NOTES"
 The sview command can only be build if \fIgtk+\-2.0\fR is installed.
-Systems lacking these libraries will have SLURM installed without
+Systems lacking these libraries will have Slurm installed without
 the sview command.
 
 At least some gtk themes are unable to display large numbers of lines (jobs,
@@ -70,15 +70,15 @@ Copyright (C) 2008\-2011 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index d7b82005c6a..264f813110c 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -197,11 +197,11 @@ Execute this function before issuing a request to submit or modify a job.
 .LP
 \fBslurm_job_will_run\fR Determine if the supplied job description could be executed immediately.
 .LP
-\fBslurm_read_hostfile\fR Read a SLURM hostfile specified by
-"filename".  "filename" must contain a list of SLURM NodeNames, one
+\fBslurm_read_hostfile\fR Read a Slurm hostfile specified by
+"filename".  "filename" must contain a list of Slurm NodeNames, one
 per line.  Reads up to "n" number of hostnames from the file. Returns
 a string representing a hostlist ranged string of the contents
-of the file.  This is a helper function, it does not contact any SLURM
+of the file.  This is a helper function, it does not contact any Slurm
 daemons.
 .LP
 \fBslurm_submit_batch_job\fR Submit a job for later execution. Note that if
@@ -274,7 +274,7 @@ the partition's time limit.
 \fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "NON-BLOCKING EXAMPLE"
 .LP
 #include <stdio.h>
@@ -425,15 +425,15 @@ Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_checkpoint_error.3 b/doc/man/man3/slurm_checkpoint_error.3
index e7d97bd55fd..b7c8e7a0bfa 100644
--- a/doc/man/man3/slurm_checkpoint_error.3
+++ b/doc/man/man3/slurm_checkpoint_error.3
@@ -132,10 +132,10 @@ Error message for checkpoint operation. Only the \fIerror_msg\fP value for the h
 \fIimage_dir\fP
 Directory specification for where the checkpoint file should be read from or
 written to. The default value is specified by the \fIJobCheckpointDir\fP
-SLURM configuration parameter.
+Slurm configuration parameter.
 .TP
 \fIjob_id\fP
-SLURM job ID to perform the operation upon.
+Slurm job ID to perform the operation upon.
 .TP
 \fImax_wait\fP
 Maximum time to allow for the operation to complete in seconds.
@@ -147,7 +147,7 @@ Nodes to send the request.
 Time at which last checkpoint operation began (if one is in progress), otherwise zero.
 .TP
 \fIstep_id\fP
-SLURM job step ID to perform the operation upon.
+Slurm job step ID to perform the operation upon.
 May be NO_VAL if the operation is to be performed on all steps of the specified job.
 Specify SLURM_BATCH_SCRIPT to checkpoint a batch job.
 .TP
@@ -262,15 +262,15 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_clear_trigger.3 b/doc/man/man3/slurm_clear_trigger.3
index 33983dd329a..9bf44cf9c98 100644
--- a/doc/man/man3/slurm_clear_trigger.3
+++ b/doc/man/man3/slurm_clear_trigger.3
@@ -104,15 +104,15 @@ Copyright (C) 2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 Portions Copyright (C) 2014 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_complete_job.3 b/doc/man/man3/slurm_complete_job.3
index 37848e43c7e..a625a9ed3f1 100644
--- a/doc/man/man3/slurm_complete_job.3
+++ b/doc/man/man3/slurm_complete_job.3
@@ -46,7 +46,7 @@ not be modified.
 \fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 
 .SH "NOTE"
 These functions are included in the libslurm library,
@@ -59,15 +59,15 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_free_ctl_conf.3 b/doc/man/man3/slurm_free_ctl_conf.3
index e94da737702..eacd0eb50ca 100644
--- a/doc/man/man3/slurm_free_ctl_conf.3
+++ b/doc/man/man3/slurm_free_ctl_conf.3
@@ -55,7 +55,7 @@ is not returned.  Otherwise all the configuration. job, node, or partition recor
 are returned.
 .SH "DESCRIPTION"
 .LP
-\fBslurm_api_version\fR Return the SLURM API version number.
+\fBslurm_api_version\fR Return the Slurm API version number.
 .LP
 \fBslurm_free_ctl_conf\fR Release the storage generated by the
 \fBslurm_load_ctl_conf\fR function.
@@ -67,9 +67,9 @@ Slurm configuration records.
 \fBslurm_load_ctl_conf\fR function.
 .SH "RETURN VALUE"
 .LP
-For \fBslurm_api_version\fR the SLURM API version number is returned.
+For \fBslurm_api_version\fR the Slurm API version number is returned.
 All other functions return zero on success and \-1 on error with the
-SLURM error code set appropriately.
+Slurm error code set appropriately.
 .SH "ERRORS"
 .LP
 \fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
@@ -77,7 +77,7 @@ SLURM error code set appropriately.
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "EXAMPLE"
 .LP
 #include <stdio.h>
@@ -96,7 +96,7 @@ int main (int argc, char *argv[])
 .br
 	long version = slurm_api_version();
 .LP
-	/* We can use the SLURM version number to determine how
+	/* We can use the Slurm version number to determine how
 .br
 	 * API should be used */
 .br
@@ -152,15 +152,15 @@ Copyright (C) 2002\-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_free_front_end_info_msg.3 b/doc/man/man3/slurm_free_front_end_info_msg.3
index 2f827148275..3561b26fe80 100644
--- a/doc/man/man3/slurm_free_front_end_info_msg.3
+++ b/doc/man/man3/slurm_free_front_end_info_msg.3
@@ -116,7 +116,7 @@ is set appropriately.
 your code.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 
 .SH "EXAMPLE"
 .LP
@@ -194,15 +194,15 @@ Copyright (C) 2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index 6f05a85bdd3..c6ab172bfae 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -147,7 +147,7 @@ REM_TIME = ISLURM_GET_REM_TIME2()
 .LP
 ISLURM_GET_REM_TIME2() is equivalent to ISLURM_GET_REM_TIME() except
 that the JOBID is taken from the SLURM_JOB_ID environment variable,
-which is set by SLURM for tasks which it launches.
+which is set by Slurm for tasks which it launches.
 Both functions return the number of seconds remaining before the job
 reaches the end of it's allocated time.
 
@@ -246,14 +246,14 @@ allocation response message.
 \fBslurm_load_jobs\fR function.
 .LP
 \fBslurm_get_end_time\fR Returns the expected termination time of a specified
-SLURM job. The time corresponds to the exhaustion of the job\'s or partition\'s
+Slurm job. The time corresponds to the exhaustion of the job\'s or partition\'s
 time limit. NOTE: The data is cached locally and only retrieved from the
-SLURM controller once per minute.
+Slurm controller once per minute.
 .LP
 \fBslurm_get_rem_time\fR Returns the number of seconds remaining before the
-expected termination time of a specified SLURM job id. The time corresponds
+expected termination time of a specified Slurm job id. The time corresponds
 to the exhaustion of the job\'s or partition\'s time limit. NOTE: The data is
-cached locally and only retrieved from the SLURM controller once per minute.
+cached locally and only retrieved from the Slurm controller once per minute.
 .LP
 \fBslurm_job_cpus_allocated_on_node\fR and 
 \fBslurm_job_cpus_allocated_on_node_id\fR return the number of CPUs allocated
@@ -302,7 +302,7 @@ your code.
 \fBESLURM_INVALID_JOB_ID\fR Request for information about a non\-existent job.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .LP
 \fBINVAL\fR Invalid function argument.
 
@@ -421,7 +421,7 @@ Some data structures contain index values to cross\-reference each other.
 If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 .LP
-The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
+The \fBslurm_hostlist_\fR functions can be used to convert Slurm node list
 expressions into a collection of individual node names.
 
 .SH "COPYING"
@@ -430,15 +430,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_free_job_step_info_response_msg.3 b/doc/man/man3/slurm_free_job_step_info_response_msg.3
index 2e54956a0a8..05011d5e091 100644
--- a/doc/man/man3/slurm_free_job_step_info_response_msg.3
+++ b/doc/man/man3/slurm_free_job_step_info_response_msg.3
@@ -118,7 +118,7 @@ code is set appropriately.
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "EXAMPLE"
 .LP
 #include <stdio.h>
@@ -200,7 +200,7 @@ Some data structures contain index values to cross\-reference each other.
 If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 .LP
-The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
+The \fBslurm_hostlist_\fR functions can be used to convert Slurm node list
 expressions into a collection of individual node names.
 
 .SH "COPYING"
@@ -208,15 +208,15 @@ Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_free_node_info.3 b/doc/man/man3/slurm_free_node_info.3
index 1049e42fad7..6970b51b919 100644
--- a/doc/man/man3/slurm_free_node_info.3
+++ b/doc/man/man3/slurm_free_node_info.3
@@ -144,7 +144,7 @@ is set appropriately.
 your code.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "EXAMPLE"
 .LP
 #include <stdio.h>
@@ -277,15 +277,15 @@ Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_free_partition_info.3 b/doc/man/man3/slurm_free_partition_info.3
index 30d688690fe..65bc23ed2d1 100644
--- a/doc/man/man3/slurm_free_partition_info.3
+++ b/doc/man/man3/slurm_free_partition_info.3
@@ -104,7 +104,7 @@ On success, zero is returned. On error, \-1 is returned, and Slurm error code is
 your code.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "EXAMPLE"
 .LP
 #include <stdio.h>
@@ -186,7 +186,7 @@ Some data structures contain index values to cross\-reference each other.
 If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 .LP
-The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
+The \fBslurm_hostlist_\fR functions can be used to convert Slurm node list
 expressions into a collection of individual node names.
 
 .SH "COPYING"
@@ -194,15 +194,15 @@ Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_get_errno.3 b/doc/man/man3/slurm_get_errno.3
index 85f4008fa70..db857d3ba4a 100644
--- a/doc/man/man3/slurm_get_errno.3
+++ b/doc/man/man3/slurm_get_errno.3
@@ -77,15 +77,15 @@ Copyright (C) 2002 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_hostlist_create.3 b/doc/man/man3/slurm_hostlist_create.3
index 99f5ae05cde..79621da49b4 100644
--- a/doc/man/man3/slurm_hostlist_create.3
+++ b/doc/man/man3/slurm_hostlist_create.3
@@ -110,15 +110,15 @@ Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_job_step_create.3 b/doc/man/man3/slurm_job_step_create.3
index e84fe861c52..975bf61669e 100644
--- a/doc/man/man3/slurm_job_step_create.3
+++ b/doc/man/man3/slurm_job_step_create.3
@@ -65,7 +65,7 @@ This is indicative of the job being suspended. Retry the call as desired.
 \fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 
 .SH "NOTE"
 These functions are included in the libslurm library,
@@ -77,15 +77,15 @@ Copyright (C) 2002-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3
index 355a23f826a..7842a7fa1cd 100644
--- a/doc/man/man3/slurm_kill_job.3
+++ b/doc/man/man3/slurm_kill_job.3
@@ -107,7 +107,7 @@ On success, zero is returned. On error, \-1 is returned, and Slurm error code is
 \fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 
 .SH "NOTE"
 These functions are included in the libslurm library,
@@ -119,15 +119,15 @@ Copyright (C) 2002 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_load_reservations.3 b/doc/man/man3/slurm_load_reservations.3
index 1dc80e05217..a83cf43ed1c 100644
--- a/doc/man/man3/slurm_load_reservations.3
+++ b/doc/man/man3/slurm_load_reservations.3
@@ -108,7 +108,7 @@ is set appropriately.
 your code.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "EXAMPLE"
 .LP
 #include <stdio.h>
@@ -186,7 +186,7 @@ These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 .LP
-The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
+The \fBslurm_hostlist_\fR functions can be used to convert Slurm node list
 expressions into a collection of individual node names.
 
 .SH "COPYING"
@@ -194,15 +194,15 @@ Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_reconfigure.3 b/doc/man/man3/slurm_reconfigure.3
index 0d745b4662a..826b38dfa45 100644
--- a/doc/man/man3/slurm_reconfigure.3
+++ b/doc/man/man3/slurm_reconfigure.3
@@ -246,7 +246,7 @@ node state transition is not valid.
 \fBESLURM_INVALID_PARTITION_NAME\fR The requested partition name is not valid.
 .LP
 \fBESLURM_INVALID_AUTHTYPE_CHANGE\fR The \fBAuthType\fR parameter can
-not be changed using the \fBslurm_reconfigure\fR function, but all SLURM
+not be changed using the \fBslurm_reconfigure\fR function, but all Slurm
 daemons and commands must be restarted. See \fBslurm.conf\fR(5) for more
 information.
 .LP
@@ -257,14 +257,14 @@ parameters may also be required. See \fBslurm.conf\fR(5) for more information.
 .LP
 \fBESLURM_INVALID_SWITCHTYPE_CHANGE\fR The \fBSwitchType\fR parameter can
 not be changed using the \fBslurm_reconfigure\fR function, but all
-SLURM daemons and commands must be restarted. All previously running
+Slurm daemons and commands must be restarted. All previously running
 jobs will be lost. See \fBslurm.conf\fR(5) for more information.
 .LP
 \fBESLURM_ACCESS_DENIED\fR The requesting user lacks authorization for
 the requested action (e.g. trying to delete or modify another user's job).
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .LP
 \fBESLURM_RESERVATION_ACCESS\fR  Requestor is not authorized to access the
 reservation.
@@ -396,15 +396,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_slurmd_status.3 b/doc/man/man3/slurm_slurmd_status.3
index 3b080f3b95e..c1102fda44d 100644
--- a/doc/man/man3/slurm_slurmd_status.3
+++ b/doc/man/man3/slurm_slurmd_status.3
@@ -54,15 +54,15 @@ Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3
index 3eb594e6472..f00c9a6cbde 100644
--- a/doc/man/man3/slurm_step_ctx_create.3
+++ b/doc/man/man3/slurm_step_ctx_create.3
@@ -222,7 +222,7 @@ This is indicative of the job being suspended. Retry the call as desired.
 \fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 .SH "EXAMPLE
 .LP
 SEE \fBslurm_step_launch\fR(3) man page for an example of slurm_step_ctx_create
@@ -238,15 +238,15 @@ Copyright (C) 2004-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3
index 2e2bedc4193..8e5704669b1 100644
--- a/doc/man/man3/slurm_step_launch.3
+++ b/doc/man/man3/slurm_step_launch.3
@@ -143,7 +143,7 @@ or SLURM_ERROR if the job step is aborted during launch.
 \fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
-SLURM controller.
+Slurm controller.
 
 .SH "EXAMPLE
 .LP
@@ -232,15 +232,15 @@ Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm  a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/acct_gather.conf.5 b/doc/man/man5/acct_gather.conf.5
index 9b643c7a289..1da92cf20da 100644
--- a/doc/man/man5/acct_gather.conf.5
+++ b/doc/man/man5/acct_gather.conf.5
@@ -17,12 +17,12 @@ Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 
 .LP
 The following acct_gather.conf parameters are defined to control the general
-behavior of various plugins in SLURM.
+behavior of various plugins in Slurm.
 
 .LP
 The acct_gather.conf file is different than other Slurm .conf files.  Each
@@ -158,15 +158,15 @@ InfinibandOFEDPort=1
 Copyright (C) 2012-2013 Bull.
 Produced at Bull (cf, DISCLAIMER).
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index cd07ced6d84..d9393354cfb 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -1,11 +1,11 @@
 .TH "bluegene.conf" "5" "Slurm Configuration File" "April 2015" "Slurm Configuration File"
 
 .SH "NAME"
-bluegene.conf \- SLURM configuration file for BlueGene systems
+bluegene.conf \- Slurm configuration file for BlueGene systems
 
 .SH "DESCRIPTION"
 \fBbluegene.conf\fP is an ASCII file which describes IBM BlueGene specific
-SLURM configuration information. This includes specifications for bgblock
+Slurm configuration information. This includes specifications for bgblock
 layout, configuration, logging, etc.
 The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
@@ -106,12 +106,12 @@ NOTE - There is a current limitation for sub-block jobs and how the system
 network device hardware has cutoff registers to prevent packets from flowing
 outside of the sub-block. Unfortunately, when the sub-block has a size 3,
 the job can attempt to send user packets outside of its sub-block. This causes
-it to be terminated by signal 36.  To prevent this from happening SLURM does
+it to be terminated by signal 36.  To prevent this from happening Slurm does
 not allow a sub-block to be used with any dimension of 3.
 
 NOTE - In the current IBM API it does not allow wrapping inside a midplane.
 Meaning you can not create a sub-block of 2 with nodes in the 0 and 3 position.
-SLURM will support this in the future when the underlying system allows it.
+Slurm will support this in the future when the underlying system allows it.
 
 .TP
 \fBRebootQOSList\fR
@@ -182,7 +182,7 @@ BlueGene/L.
 
 .TP
 \fBLayoutMode\fR
-Describes how SLURM should create bgblocks.
+Describes how Slurm should create bgblocks.
 .RS
 .TP 10
 \fBSTATIC\fR:
@@ -344,15 +344,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/cgroup.conf.5 b/doc/man/man5/cgroup.conf.5
index 3e6338c7cda..28d1dc49d58 100644
--- a/doc/man/man5/cgroup.conf.5
+++ b/doc/man/man5/cgroup.conf.5
@@ -16,7 +16,7 @@ Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 
 .LP
@@ -178,15 +178,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/cray.conf.5 b/doc/man/man5/cray.conf.5
index 8070123de2b..6cf7e0dd783 100644
--- a/doc/man/man5/cray.conf.5
+++ b/doc/man/man5/cray.conf.5
@@ -17,7 +17,7 @@ Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 The configuration parameters available include:
@@ -91,7 +91,7 @@ cases the user will be charged for the entire node.
 
 .TP
 \fBSyncTimeout\fR=<seconds>
-SLURM does not normally schedule jobs while its job or node state information
+Slurm does not normally schedule jobs while its job or node state information
 is out of synchronization with that of ALPS. This parameter specifies a maximum
 time to defer job scheduling while waiting for consistent state.  The
 inconsistent state might be caused by a variety of hardware or software
@@ -118,15 +118,15 @@ SDBdb=XT5istanbul
 .SH "COPYING"
 Copyright (C) 2011-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/ext_sensors.conf.5 b/doc/man/man5/ext_sensors.conf.5
index 086cf257f3e..4e5dba5a7bd 100644
--- a/doc/man/man5/ext_sensors.conf.5
+++ b/doc/man/man5/ext_sensors.conf.5
@@ -17,7 +17,7 @@ Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 
 .LP
@@ -95,15 +95,15 @@ ColdDoorData=temp
 .SH "COPYING"
 Copyright (C) 2013 Bull
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/gres.conf.5 b/doc/man/man5/gres.conf.5
index 49d348afd50..39a103aa718 100644
--- a/doc/man/man5/gres.conf.5
+++ b/doc/man/man5/gres.conf.5
@@ -6,7 +6,7 @@ gres.conf \- Slurm configuration file for generic resource management.
 .SH "DESCRIPTION"
 \fBgres.conf\fP is an ASCII file which describes the configuration
 of generic resources on each compute node. Each node must contain a
-gres.conf file if generic resources are to be scheduled by SLURM.
+gres.conf file if generic resources are to be scheduled by Slurm.
 The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
 environment variable. The file will always be located in the
@@ -17,7 +17,7 @@ Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 The overall configuration parameters available include:
@@ -44,10 +44,10 @@ resource; an attempt to use other CPUs will not be honored.
 If not specified, then any CPU can be used with the resources, which also
 increases the speed of Slurm's scheduling algorithm.
 If any CPU can be effectively used with the resources, then do not specify the
-\fBCPUs\fR option for improved speed in the SLURM scheduling logic.
+\fBCPUs\fR option for improved speed in the Slurm scheduling logic.
 
-Since SLURM must be able to perform resource management on heterogeneous
-clusters having various CPU ID numbering schemes, use the SLURM CPU index
+Since Slurm must be able to perform resource management on heterogeneous
+clusters having various CPU ID numbering schemes, use the Slurm CPU index
 numbers here
 (CPU_ID = Board_ID x threads_per_board +
 Socket_ID x threads_per_socket +
@@ -70,8 +70,8 @@ device counts.
 Use the \fBFile\fR parameter only if the \fBCount\fR is not sufficient for
 tracking purposes.
 NOTE: If you specify the \fBFile\fR parameter for a resource on some node,
-the option must be specified on all nodes and SLURM will track the assignment
-of each specific resource on each node. Otherwise SLURM will only track a
+the option must be specified on all nodes and Slurm will track the assignment
+of each specific resource on each node. Otherwise Slurm will only track a
 count of allocated resources rather than the state of each individual device
 file.
 
@@ -112,7 +112,7 @@ If \fBType\fR is specified, then \fBCount\fR is limited in size (currently 1024)
 .br
 ##################################################################
 .br
-# SLURM's Generic Resource (GRES) configuration file
+# Slurm's Generic Resource (GRES) configuration file
 .br
 ##################################################################
 .br
@@ -132,7 +132,7 @@ Name=bandwidth Count=20M
 .br
 ##################################################################
 .br
-# SLURM's Generic Resource (GRES) configuration file
+# Slurm's Generic Resource (GRES) configuration file
 .br
 # Use a single gres.conf file for all compute nodes
 .br
@@ -148,15 +148,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2014 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/nonstop.conf.5 b/doc/man/man5/nonstop.conf.5
index 3eeab3cceda..34fa95c574b 100644
--- a/doc/man/man5/nonstop.conf.5
+++ b/doc/man/man5/nonstop.conf.5
@@ -19,7 +19,7 @@ Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 The configuration parameters available include:
 
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index de57ba096c5..864766104ac 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -4,7 +4,7 @@
 slurm.conf \- Slurm configuration file
 
 .SH "DESCRIPTION"
-\fBslurm.conf\fP is an ASCII file which describes general SLURM
+\fBslurm.conf\fP is an ASCII file which describes general Slurm
 configuration information, the nodes to be managed, information about
 how those nodes are grouped into partitions, and various scheduling
 parameters associated with those partitions. This file should be
@@ -12,7 +12,7 @@ consistent across all nodes in the cluster.
 .LP
 The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
-environment variable. The SLURM daemons also allow you to override
+environment variable. The Slurm daemons also allow you to override
 both the built\-in and environment\-provided location using the "\-f"
 option on the command line.
 .LP
@@ -20,7 +20,7 @@ The contents of the file are case insensitive except for the names of nodes
 and partitions. Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 If a line begins with the word "Include" followed by whitespace
@@ -31,14 +31,14 @@ MODIFIERS for more details).
 .LP
 Note on file permissions:
 .LP
-The \fIslurm.conf\fR file must be readable by all users of SLURM, since it
-is used by many of the SLURM commands.  Other files that are defined
+The \fIslurm.conf\fR file must be readable by all users of Slurm, since it
+is used by many of the Slurm commands.  Other files that are defined
 in the \fIslurm.conf\fR file, such as log files and job accounting files,
 may need to be created/owned by the user "SlurmUser" to be successfully
 accessed.  Use the "chown" and "chmod" commands to set the ownership
 and permissions appropriately.
 See the section \fBFILE AND DIRECTORY PERMISSIONS\fR for information
-about the various files and directories used by SLURM.
+about the various files and directories used by Slurm.
 
 .SH "PARAMETERS"
 .LP
@@ -104,7 +104,7 @@ Also see \fBDefaultStorageLoc\fR.
 \fBAccountingStoragePass\fR
 The password used to gain access to the database to store the
 accounting data.  Only used for database type storage plugins, ignored
-otherwise.  In the case of SLURM DBD (Database Daemon) with MUNGE
+otherwise.  In the case of Slurm DBD (Database Daemon) with MUNGE
 authentication this can be configured to use a MUNGE daemon
 specifically configured to provide authentication between clusters
 while the default MUNGE daemon provides authentication within a
@@ -131,7 +131,7 @@ will be written to the file specified by the
 value indicates that accounting records will be written to a MySQL or
 MariaDB database specified by the \fBAccountingStorageLoc\fR parameter.
 The "accounting_storage/slurmdbd" value indicates that accounting records
-will be written to the SLURM DBD, which manages an underlying MySQL
+will be written to the Slurm DBD, which manages an underlying MySQL
 database. See "man slurmdbd" for more information.  The
 default value is "accounting_storage/none" and indicates that account
 records are not maintained.
@@ -276,7 +276,7 @@ If not specified, the default authentication information will be used.
 
 .TP
 \fBAuthType\fR
-The authentication method for communications between SLURM
+The authentication method for communications between Slurm
 components.
 Acceptable values at present include "auth/none", "auth/authd",
 and "auth/munge".
@@ -288,10 +288,10 @@ This may be fine for testing purposes, but
 "http://www.theether.org/authd/" for more information. Note that
 authd is no longer actively supported).
 "auth/munge" indicates that LLNL's MUNGE is to be used
-(this is the best supported authentication mechanism for SLURM,
+(this is the best supported authentication mechanism for Slurm,
 see "http://munge.googlecode.com/" for more information).
-All SLURM daemons and commands must be terminated prior to changing
-the value of \fBAuthType\fR and later restarted (SLURM jobs can be
+All Slurm daemons and commands must be terminated prior to changing
+the value of \fBAuthType\fR and later restarted (Slurm jobs can be
 preserved).
 
 .TP
@@ -306,7 +306,7 @@ By default the \fBBackupAddr\fR will be identical in value to
 
 .TP
 \fBBackupController\fR
-The name of the machine where SLURM control functions are to be
+The name of the machine where Slurm control functions are to be
 executed in the event that \fBControlMachine\fR fails. This node
 may also be used as a compute server if so desired. It will come into service
 as a controller only upon the failure of ControlMachine and will revert
@@ -357,10 +357,10 @@ for IBM AIX systems only
 .TP
 \fBcheckpoint/blcr\fR
 Berkeley Lab Checkpoint Restart (BLCR).
-NOTE: If a file is found at sbin/scch (relative to the SLURM installation
+NOTE: If a file is found at sbin/scch (relative to the Slurm installation
 location), it will be executed upon completion of the checkpoint. This can
 be a script used for managing the checkpoint files.
-NOTE: SLURM's BLCR logic only supports batch jobs.
+NOTE: Slurm's BLCR logic only supports batch jobs.
 .TP
 \fBcheckpoint/none\fR
 no checkpoint support (default)
@@ -389,7 +389,7 @@ This option specified the fully qualified pathname of the chos command
 
 .TP
 \fBClusterName\fR
-The name by which this SLURM managed cluster is known in the
+The name by which this Slurm managed cluster is known in the
 accounting database.  This is needed distinguish accounting records
 when multiple clusters report to the same database. Because of limitations
 in some databases, any upper case letters in the name will be silently mapped
@@ -425,7 +425,7 @@ By default the \fBControlAddr\fR will be identical in value to
 
 .TP
 \fBControlMachine\fR
-The short hostname of the machine where SLURM control functions are
+The short hostname of the machine where Slurm control functions are
 executed (i.e. the name returned by the command "hostname \-s", use
 "tux001" rather than "tux001.my.com").
 This value must be specified.
@@ -580,7 +580,7 @@ Job container plugin details
 License management details
 .TP
 \fBNO_CONF_HASH\fR
-Do not log when the slurm.conf files differs between SLURM daemons
+Do not log when the slurm.conf files differs between Slurm daemons
 .TP
 \fBPriority\fR
 Job prioritization
@@ -791,7 +791,7 @@ Consider which value you want to be used for scheduling purposes.
 .TP 5
 \fB0\fR
 Base scheduling decisions upon the actual configuration of each individual
-node except that the node's processor count in SLURM's configuration must
+node except that the node's processor count in Slurm's configuration must
 match the actual hardware configuration if \fBSchedulerType=sched/gang\fR
 or \fBSelectType=select/cons_res\fR are configured (both of those plugins
 maintain resource allocation information using bitmaps for the cores in the
@@ -812,7 +812,7 @@ This option is generally only useful for testing purposes.
 
 .TP
 \fBFirstJobId\fR
-The job id to be used for the first submitted to SLURM without a
+The job id to be used for the first submitted to Slurm without a
 specific requested value. Job id values generated will incremented by 1
 for each subsequent job. This may be used to provide a meta\-scheduler
 with a job id space which is disjoint from the interactive jobs.
@@ -985,7 +985,7 @@ The default value for task sampling interval
 is 30 seconds. The default value for all other intervals is 0.
 An interval of 0 disables sampling of the specified type.
 If the task sampling interval is 0, accounting
-information is collected only at job termination (reducing SLURM
+information is collected only at job termination (reducing Slurm
 interference with the job).
 .br
 .br
@@ -1089,13 +1089,13 @@ used for all other system types
 .TP
 \fBJobCredentialPrivateKey\fR
 Fully qualified pathname of a file containing a private key used for
-authentication by SLURM daemons.
+authentication by Slurm daemons.
 This parameter is ignored if \fBCryptoType=crypto/munge\fR.
 
 .TP
 \fBJobCredentialPublicCertificate\fR
 Fully qualified pathname of a file containing a public key used for
-authentication by SLURM daemons.
+authentication by Slurm daemons.
 This parameter is ignored if \fBCryptoType=crypto/munge\fR.
 
 .TP
@@ -1125,9 +1125,9 @@ These are intended to be site\-specific plugins which can be used to set
 default job parameters and/or logging events.
 Sample plugins available in the distribution include "all_partitions", "cnode",
 "defaults", "logging", "lua", and "partition".
-For examples of use, see the SLURM code in "src/plugins/job_submit" and
+For examples of use, see the Slurm code in "src/plugins/job_submit" and
 "contribs/lua/job_submit*.lua" then modify the code to satisfy your needs.
-SLURM can be configured to use multiple job_submit plugins if desired,
+Slurm can be configured to use multiple job_submit plugins if desired,
 however the lua plugin will only execute one lua script named "job_submit.lua"
 located in the default script directory (typically the subdirectory "etc" of
 the installation directory).
@@ -1181,9 +1181,9 @@ License names can optionally be followed by a colon
 and count with a default count of one.
 Multiple license names should be comma separated (e.g.
 "Licenses=foo:4,bar").
-Note that SLURM prevents jobs from being scheduled if their
+Note that Slurm prevents jobs from being scheduled if their
 required license specification is not available.
-SLURM does not prevent jobs from using licenses that are
+Slurm does not prevent jobs from using licenses that are
 not explicitly listed in the job submission specification.
 
 .TP
@@ -1234,7 +1234,7 @@ It only takes effect upon restart of the slurmctld daemon.
 
 .TP
 \fBMaxJobId\fR
-The maximum job id to be used for jobs submitted to SLURM without a
+The maximum job id to be used for jobs submitted to Slurm without a
 specific requested value. Job id values generated will incremented by 1
 for each subsequent job. This may be used to provide a meta\-scheduler
 with a job id space which is disjoint from the interactive jobs.
@@ -1277,7 +1277,7 @@ The default value is 40000 steps.
 
 .TP
 \fBMaxTasksPerNode\fR
-Maximum number of tasks SLURM will allow a job step to spawn
+Maximum number of tasks Slurm will allow a job step to spawn
 on a single node. The default \fBMaxTasksPerNode\fR is 128.
 May not exceed 65533.
 
@@ -1300,7 +1300,7 @@ necessitate higher values.
 .TP
 \fBMinJobAge\fR
 The minimum age of a completed job before its record is purged from
-SLURM's active database. Set the values of \fBMaxJobCount\fR and
+Slurm's active database. Set the values of \fBMaxJobCount\fR and
   to insure the slurmctld daemon does not exhaust
 its memory or other resources. The default value is 300 seconds.
 A value of zero prevents any job record purging.
@@ -1346,14 +1346,14 @@ A value of "UNLIMITED" is also supported.
 
 .TP
 \fBPluginDir\fR
-Identifies the places in which to look for SLURM plugins.
+Identifies the places in which to look for Slurm plugins.
 This is a colon\-separated list of directories, like the PATH
 environment variable.
 The default value is "/usr/local/lib/slurm".
 
 .TP
 \fBPlugStackConfig\fR
-Location of the config file for SLURM stackable plugins that use
+Location of the config file for Slurm stackable plugins that use
 the Stackable Plugin Architecture for Node job (K)control (SPANK).
 This provides support for a highly configurable set of plugins to
 be called before and/or after execution of each task spawned as
@@ -1428,7 +1428,7 @@ This is not compatible with \fBPreemptMode=OFF\fR.
 .TP
 \fBpreempt/qos\fR
 Job preemption rules are specified by Quality Of Service (QOS) specifications
-in the SLURM database.
+in the Slurm database.
 This is not compatible with \fBPreemptMode=OFF\fR or \fBPreemptMode=SUSPEND\fR
 (i.e. preempted jobs must be removed from the resources).
 .RE
@@ -1612,7 +1612,7 @@ Acceptable values include:
 .RS
 .TP
 \fBaccounts\fR
-(NON-SLURMDBD ACCOUNTING ONLY) Prevents users from viewing any account
+(NON-SlurmDBD ACCOUNTING ONLY) Prevents users from viewing any account
 definitions unless they are coordinators of them.
 .TP
 \fBcloud\fR
@@ -1620,7 +1620,7 @@ Powered down nodes in the cloud are visible.
 .TP
 \fBjobs\fR
 Prevents users from viewing jobs or job steps belonging
-to other users. (NON-SLURMDBD ACCOUNTING ONLY) Prevents users from viewing
+to other users. (NON-SlurmDBD ACCOUNTING ONLY) Prevents users from viewing
 job records belonging to other users unless they are coordinators of
 the association running the job when using sacct.
 .TP
@@ -1635,11 +1635,11 @@ Prevents regular users from viewing reservations which they can not use.
 .TP
 \fBusage\fR
 Prevents users from viewing usage of any other user, this applies to sshare.
-(NON-SLURMDBD ACCOUNTING ONLY) Prevents users from viewing
+(NON-SlurmDBD ACCOUNTING ONLY) Prevents users from viewing
 usage of any other user, this applies to sreport.
 .TP
 \fBusers\fR
-(NON-SLURMDBD ACCOUNTING ONLY) Prevents users from viewing
+(NON-SlurmDBD ACCOUNTING ONLY) Prevents users from viewing
 information of any user other than themselves, this also makes it so users can
 only see associations they deal with.
 Coordinators can see associations of all users they are coordinator of,
@@ -1795,7 +1795,7 @@ the default action is to propagate all limits.
 Only one of the parameters, either
 \fBPropagateResourceLimits\fR or \fBPropagateResourceLimitsExcept\fR,
 may be specified.
-The following limit names are supported by SLURM (although some
+The following limit names are supported by Slurm (although some
 options may not be supported on some systems):
 .RS
 .TP 10
@@ -1903,7 +1903,7 @@ set to the number of times the job has been restarted.
 
 .TP
 \fBResumeProgram\fR
-SLURM supports a mechanism to reduce power consumption on nodes that
+Slurm supports a mechanism to reduce power consumption on nodes that
 remain idle for an extended period of time.
 This is typically accomplished by reducing voltage and frequency or powering
 the node down.
@@ -1915,13 +1915,13 @@ If \fBResumeProgram\fR is unable to restore a node to service, it should
 requeue any node associated with the node and set the node state to DRAIN.
 The program executes as \fBSlurmUser\fR.
 The argument to the program will be the names of nodes to
-be removed from power savings mode (using SLURM's hostlist
+be removed from power savings mode (using Slurm's hostlist
 expression format).
 By default no program is run.
 Related configuration options include \fBResumeTimeout\fR, \fBResumeRate\fR,
 \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
-More information is available at the SLURM web site
+More information is available at the Slurm web site
 ( http://slurm.schedmd.com/power_save.html ).
 
 .TP
@@ -1947,7 +1947,7 @@ The default value is 60 seconds.
 Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
 \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
-More information is available at the SLURM web site
+More information is available at the Slurm web site
 ( http://slurm.schedmd.com/power_save.html ).
 
 .TP
@@ -2555,14 +2555,14 @@ Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 The name of the user that the \fBslurmctld\fR daemon executes as.
 For security purposes, a user other than "root" is recommended.
 This user must exist on all nodes of the cluster for authentication
-of communications between SLURM components.
+of communications between Slurm components.
 The default value is "root".
 
 .TP
 \fBSlurmdUser\fR
 The name of the user that the \fBslurmd\fR daemon executes as.
 This user must exist on all nodes of the cluster for authentication
-of communications between SLURM components.
+of communications between Slurm components.
 The default value is "root".
 
 .TP
@@ -2620,13 +2620,13 @@ The default value is "/var/run/slurmctld.pid".
 
 .TP
 \fBSlurmctldPlugstack\fR
-A comma delimited list of SLURM controller plugins to be started when the
+A comma delimited list of Slurm controller plugins to be started when the
 daemon begins and terminated when it ends.
 Only the plugin's init and fini functions are called.
 
 .TP
 \fBSlurmctldPort\fR
-The port number that the SLURM controller, \fBslurmctld\fR, listens
+The port number that the Slurm controller, \fBslurmctld\fR, listens
 to for work. The default value is SLURMCTLD_PORT as established at system
 build time. If none is explicitly specified, it will be set to 6817.
 \fBSlurmctldPort\fR may also be configured to support a range of port
@@ -2687,7 +2687,7 @@ logs are written.
 The default value is none (performs logging via syslog).
 Any "%h" within the name is replaced with the hostname on which the
 \fBslurmd\fR is running.
-Any "%n" within the name is replaced with the SLURM node name on which the
+Any "%n" within the name is replaced with the Slurm node name on which the
 \fBslurmd\fR is running.
 .br
 See the section \fBLOGGING\fR if a pathname is specified.
@@ -2698,19 +2698,19 @@ Fully qualified pathname of a file into which the  \fBslurmd\fR daemon may write
 its process id. This may be used for automated signal processing.
 Any "%h" within the name is replaced with the hostname on which the
 \fBslurmd\fR is running.
-Any "%n" within the name is replaced with the SLURM node name on which the
+Any "%n" within the name is replaced with the Slurm node name on which the
 \fBslurmd\fR is running.
 The default value is "/var/run/slurmd.pid".
 
 .TP
 \fBSlurmdPlugstack\fR
-A comma delimited list of SLURM compute node plugins to be started when the
+A comma delimited list of Slurm compute node plugins to be started when the
 daemon begins and terminated when it ends.
 Only the plugin's init and fini functions are called.
 
 .TP
 \fBSlurmdPort\fR
-The port number that the SLURM compute node daemon, \fBslurmd\fR, listens
+The port number that the Slurm compute node daemon, \fBslurmd\fR, listens
 to for work. The default value is SLURMD_PORT as established at system
 build time. If none is explicitly specified, its value will be 6818.
 NOTE: Either slurmctld and slurmd daemons must not execute
@@ -2726,19 +2726,19 @@ is local to each node (reference a local file system). The default value
 is "/var/spool/slurmd".
 Any "%h" within the name is replaced with the hostname on which the
 \fBslurmd\fR is running.
-Any "%n" within the name is replaced with the SLURM node name on which the
+Any "%n" within the name is replaced with the Slurm node name on which the
 \fBslurmd\fR is running.
 
 .TP
 \fBSlurmdTimeout\fR
-The interval, in seconds, that the SLURM controller waits for \fBslurmd\fR
+The interval, in seconds, that the Slurm controller waits for \fBslurmd\fR
 to respond before configuring that node's state to DOWN.
 A value of zero indicates the node will not be tested by \fBslurmctld\fR to
 confirm the state of \fBslurmd\fR, the node will not be automatically set to
 a DOWN state indicating a non\-responsive \fBslurmd\fR, and some other tool
 will take responsibility for monitoring the state of each compute node
 and its \fBslurmd\fR daemon.
-SLURM's hierarchical communication mechanism is used to ping the \fBslurmd\fR
+Slurm's hierarchical communication mechanism is used to ping the \fBslurmd\fR
 daemons in order to minimize system noise and overhead.
 The default value is 300 seconds.
 The value may not exceed 65533 seconds.
@@ -2808,9 +2808,9 @@ executing.
 
 .TP
 \fBStateSaveLocation\fR
-Fully qualified pathname of a directory into which the SLURM controller,
+Fully qualified pathname of a directory into which the Slurm controller,
 \fBslurmctld\fR, saves its state (e.g. "/usr/local/slurm/checkpoint").
-SLURM state will saved here to recover from system failures.
+Slurm state will saved here to recover from system failures.
 \fBSlurmUser\fR must be able to create files in this directory.
 If you have a \fBBackupController\fR configured, this location should be
 readable and writable by both systems.
@@ -2824,7 +2824,7 @@ into this directory.
 \fBSuspendExcNodes\fR
 Specifies the nodes which are to not be placed in power save mode, even
 if the node remains idle for an extended period of time.
-Use SLURM's hostlist expression to identify nodes.
+Use Slurm's hostlist expression to identify nodes.
 By default no nodes are excluded.
 Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
 \fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR,
@@ -2849,7 +2849,7 @@ This can be used to reduce the frequency and voltage of a node or
 completely power the node off.
 The program executes as \fBSlurmUser\fR.
 The argument to the program will be the names of nodes to
-be placed into power savings mode (using SLURM's hostlist
+be placed into power savings mode (using Slurm's hostlist
 expression format).
 By default, no program is run.
 Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
@@ -2886,7 +2886,7 @@ The default value is 30 seconds.
 Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
 \fBResumeTimeout\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
-More information is available at the SLURM web site
+More information is available at the Slurm web site
 ( http://slurm.schedmd.com/power_save.html ).
 
 .TP
@@ -2898,7 +2898,7 @@ Acceptable values include
 or termination (Myrinet, Ethernet, and InfiniBand) and
 "switch/nrt" for IBM's Network Resource Table API.
 The default value is "switch/none".
-All SLURM daemons, commands and running jobs must be restarted for a
+All Slurm daemons, commands and running jobs must be restarted for a
 change in \fBSwitchType\fR to take effect.
 If running jobs exist at the time \fBslurmctld\fR is restarted with a new
 value of \fBSwitchType\fR, records of all jobs in any state may be lost.
@@ -3068,7 +3068,7 @@ systems, best\-fit logic over three\-dimensional topology
 .TP
 \fBtopology/node_rank\fR
 orders nodes based upon information a node_rank field in the node record
-as generated by a select plugin. SLURM performs a best\-fit algorithm over
+as generated by a select plugin. Slurm performs a best\-fit algorithm over
 those ordered nodes
 .TP
 \fBtopology/none\fR
@@ -3111,7 +3111,7 @@ By default no program is run.
 
 .TP
 \fBUnkillableStepTimeout\fR
-The length of time, in seconds, that SLURM will wait before deciding that
+The length of time, in seconds, that Slurm will wait before deciding that
 processes in a job step are unkillable (after they have been signaled with
 SIGKILL) and execute \fBUnkillableStepProgram\fR as described above.
 The default timeout value is 60 seconds.
@@ -3125,7 +3125,7 @@ limits. Changing the upper bound of a resource limit will not alter the limits
 of running jobs, only jobs started after a change has been made will pick up
 the new limits.
 The default value is 0 (not to enable PAM support).
-Remember that PAM also needs to be configured to support SLURM as a service.
+Remember that PAM also needs to be configured to support Slurm as a service.
 For sites using PAM's directory based configuration option, a configuration
 file named \fBslurm\fR should be created. The module\-type, control\-flags, and
 module\-path names that should be included in the file are:
@@ -3166,7 +3166,7 @@ The default value is 0, which disables this feature.
 May not exceed 65533 seconds.
 
 .LP
-The configuration of nodes (or machines) to be managed by SLURM is
+The configuration of nodes (or machines) to be managed by Slurm is
 also specified in \fB/etc/slurm.conf\fR.
 Changes in node configuration (e.g. adding nodes, changing their
 processor count, etc.) require restarting both the slurmctld daemon
@@ -3180,7 +3180,7 @@ especially if the cluster is heterogeneous.
 Nodes which register to the system with less than the configured resources
 (e.g. too little memory), will be placed in the "DOWN" state to
 avoid scheduling jobs on them.
-Establishing baseline configurations will also speed SLURM's
+Establishing baseline configurations will also speed Slurm's
 scheduling process by permitting it to compare job requirements
 against these (relatively few) configuration parameters and
 possibly avoid having to check job requirements
@@ -3218,7 +3218,7 @@ in a DOWN, DRAIN or FAILING state without altering permanent
 configuration information.
 A job step's tasks are allocated to nodes in order the nodes appear
 in the configuration file. There is presently no capability within
-SLURM to arbitrarily order a job step's tasks.
+Slurm to arbitrarily order a job step's tasks.
 .LP
 Multiple node names may be comma separated (e.g. "alpha,beta,gamma")
 and/or a simple node range expression may optionally be used to
@@ -3245,7 +3245,7 @@ The node configuration specified the following information:
 
 .TP
 \fBNodeName\fR
-Name that SLURM uses to refer to a node (or base partition for
+Name that Slurm uses to refer to a node (or base partition for
 BlueGene systems).
 Typically this would be the string that "/bin/hostname \-s" returns.
 It may also be the fully qualified domain name as returned by "/bin/hostname \-f"
@@ -3386,7 +3386,7 @@ Limit on combined real memory allocation for compute node daemons
 
 .TP
 \fBPort\fR
-The port number that the SLURM compute node daemon, \fBslurmd\fR, listens
+The port number that the Slurm compute node daemon, \fBslurmd\fR, listens
 to for work on this particular node. By default there is a single port number
 for all \fBslurmd\fR daemons on all compute nodes as defined by the
 \fBSlurmdPort\fR configuration parameter. Use of this option is not generally
@@ -3443,7 +3443,7 @@ Also see the \fBDownNodes\fR parameter below.
 \fBCLOUD\fP
 Indicates the node exists in the cloud.
 It's initial state will be treated as powered down.
-The node will be available for use after it's state is recovered from SLURM's
+The node will be available for use after it's state is recovered from Slurm's
 state save file or the slurmd daemon starts on the compute node.
 .TP
 \fBDOWN\fP
@@ -3464,11 +3464,11 @@ to any new jobs.
 .TP
 \fBFUTURE\fP
 Indicates the node is defined for future use and need not
-exist when the SLURM daemons are started. These nodes can be made available
+exist when the Slurm daemons are started. These nodes can be made available
 for use simply by updating the node state using the scontrol command rather
 than restarting the slurmctld daemon. After these nodes are made available,
 change their \fRState\fR in the slurm.conf file. Until these nodes are made
-available, they will not be seen using any SLURM commands or nor will
+available, they will not be seen using any Slurm commands or nor will
 any attempt be made to contact them.
 .TP
 \fBUNKNOWN\fP
@@ -3481,7 +3481,7 @@ The default value is "UNKNOWN".
 .TP
 \fBThreadsPerCore\fR
 Number of logical threads in a single physical core (e.g. "2").
-Note that the SLURM can allocate resources to jobs down to the
+Note that the Slurm can allocate resources to jobs down to the
 resolution of a core. If your system is configured with more than
 one thread per core, execution of a different job on each thread
 is not supported unless you configure \fBSelectTypeParameters=CR_CPU\fR
@@ -3621,7 +3621,7 @@ May not be used with the \fBAllowUsers\fR option.
 
 .TP
 \fBFrontendName\fR
-Name that SLURM uses to refer to a frontend node.
+Name that Slurm uses to refer to a frontend node.
 Typically this would be the string that "/bin/hostname \-s" returns.
 It may also be the fully qualified domain name as returned by "/bin/hostname \-f"
 (e.g. "foo1.bar.com"), or any valid domain name associated with the host
@@ -3657,7 +3657,7 @@ By default, the \fBFrontendAddr\fR will be identical in value to
 
 .TP
 \fBPort\fR
-The port number that the SLURM compute node daemon, \fBslurmd\fR, listens
+The port number that the Slurm compute node daemon, \fBslurmd\fR, listens
 to for work on this particular frontend node. By default there is a single port
 number for all \fBslurmd\fR daemons on all frontend nodes as defined by the
 \fBSlurmdPort\fR configuration parameter. Use of this option is not generally
@@ -3743,14 +3743,14 @@ If user root attempts to execute a job as another user (e.g. using
 srun's \-\-uid option), this other user must be in one of groups
 identified by AllowGroups for the job to successfully execute.
 The default value is "ALL".
-\fBNOTE:\fR For performance reasons, SLURM maintains a list of user IDs
+\fBNOTE:\fR For performance reasons, Slurm maintains a list of user IDs
 allowed to use each partition and this is checked at job submission time.
 This list of user IDs is updated when the \fBslurmctld\fR daemon is restarted,
 reconfigured (e.g. "scontrol reconfig") or the partition's \fBAllowGroups\fR
 value is reset, even if is value is unchanged
 (e.g. "scontrol update PartitionName=name AllowGroups=group").
 For a user's access to a partition to change, both his group membership must
-change and SLURM's internal user ID list must change using one of the methods
+change and Slurm's internal user ID list must change using one of the methods
 described above.
 
 .TP
@@ -3840,7 +3840,7 @@ this partition.
 .TP
 \fBHidden\fR
 Specifies if the partition and its jobs are to be hidden by default.
-Hidden partitions will by default not be reported by the SLURM APIs or commands.
+Hidden partitions will by default not be reported by the Slurm APIs or commands.
 Possible values are "YES" and "NO".
 The default value is "NO".
 Note that partitions that a user lacks access to by virtue of the
@@ -4189,7 +4189,7 @@ Name of the job.
 Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
 .TP
 \fBSLURM_JOB_NODELIST\fR
-Nodes assigned to job. A SLURM hostlist expression.
+Nodes assigned to job. A Slurm hostlist expression.
 "scontrol show hostnames" can be used to convert this to a
 list of individual host names.
 Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
@@ -4205,15 +4205,15 @@ User ID of the job's owner.
 User name of the job's owner.
 
 .SH "NETWORK TOPOLOGY"
-SLURM is able to optimize job allocations to minimize network contention.
-Special SLURM logic is used to optimize allocations on systems with a
+Slurm is able to optimize job allocations to minimize network contention.
+Special Slurm logic is used to optimize allocations on systems with a
 three\-dimensional interconnect (BlueGene, Sun Constellation, etc.)
 and information about configuring those systems are available on
 web pages available here: <http://slurm.schedmd.com/>.
-For a hierarchical network, SLURM needs to have detailed information
+For a hierarchical network, Slurm needs to have detailed information
 about how nodes are configured on the network switches.
 .LP
-Given network topology information, SLURM allocates all of a job's
+Given network topology information, Slurm allocates all of a job's
 resources onto a single leaf of the network (if possible) using a best\-fit
 algorithm.
 Otherwise it will allocate a job's resources onto multiple leaf switches
@@ -4230,7 +4230,7 @@ upon information contained in a topology.conf file,
 see "man topology.conf" for more information).
 Future plugins may gather topology information directly from the network.
 The topology information is optional.
-If not provided, SLURM will perform a best\-fit algorithm assuming the
+If not provided, Slurm will perform a best\-fit algorithm assuming the
 nodes are in a one\-dimensional array as configured and the communications
 cost is related to the node distance in this array.
 
@@ -4240,13 +4240,13 @@ will be out of service for an extended period of time, it may be
 desirable to relocate them.
 In order to do so, follow this procedure:
 .LP
-1. Stop the SLURM daemons
+1. Stop the Slurm daemons
 .br
 2. Modify the slurm.conf file appropriately
 .br
 3. Distribute the updated slurm.conf file to all nodes
 .br
-4. Restart the SLURM daemons
+4. Restart the Slurm daemons
 .LP
 There should be no loss of any running or pending jobs.
 Insure that any nodes added to the cluster have the current
@@ -4510,7 +4510,7 @@ The file must be accessible by the primary and backup control machines.
 
 .SH "LOGGING"
 .LP
-Note that while SLURM daemons create log files and other files as needed,
+Note that while Slurm daemons create log files and other files as needed,
 it treats the lack of parent directories as a fatal error.
 This prevents the daemons from running if critical file systems are
 not mounted and will minimize the risk of cold\-starting (starting
@@ -4521,7 +4521,7 @@ may need to be created/owned by the "SlurmUser" uid to be successfully
 accessed.  Use the "chown" and "chmod" commands to set the ownership
 and permissions appropriately.
 See the section \fBFILE AND DIRECTORY PERMISSIONS\fR for information
-about the various files and directories used by SLURM.
+about the various files and directories used by Slurm.
 .LP
 It is recommended that the logrotate utility be used to insure that
 various log files do not become too large.
@@ -4534,7 +4534,7 @@ See the \fBlogrotate\fR man page for more details.
 .LP
 ##
 .br
-# SLURM Logrotate Configuration
+# Slurm Logrotate Configuration
 .br
 ##
 .br
@@ -4581,15 +4581,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
 Copyright (C) 2010-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index 55844519b0b..35602f04072 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -110,7 +110,7 @@ mechanism being used.
 
 .TP
 \fBAuthType\fR
-Define the authentication method for communications between SLURM
+Define the authentication method for communications between Slurm
 components.
 Acceptable values at present include "auth/none", "auth/authd",
 and "auth/munge".
@@ -121,7 +121,7 @@ This may be fine for testing purposes, but
 "auth/authd" indicates that Brett Chun's authd is to be used (see
 "http://www.theether.org/authd/" for more information).
 "auth/munge" indicates that LLNL's Munge system is to be used
-(this is the best supported authentication mechanism for SLURM,
+(this is the best supported authentication mechanism for Slurm,
 see "https://code.google.com/p/munge/" for more information).
 SlurmDBD must be terminated prior to changing the value of \fBAuthType\fR
 and later restarted.
@@ -283,7 +283,7 @@ The default value is "/var/run/slurmdbd.pid".
 
 .TP
 \fBPluginDir\fR
-Identifies the places in which to look for SLURM plugins.
+Identifies the places in which to look for Slurm plugins.
 This is a colon\-separated list of directories, like the PATH
 environment variable.
 The default value is "/usr/local/lib/slurm".
@@ -508,15 +508,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2014 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/topology.conf.5 b/doc/man/man5/topology.conf.5
index 2aea3f13910..e837c1919ca 100644
--- a/doc/man/man5/topology.conf.5
+++ b/doc/man/man5/topology.conf.5
@@ -15,12 +15,12 @@ Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 The network topology configuration one one line defining a switch name and
 its children, either node names or switch names.
-SLURM's hostlist expression parser is used, so the node and switch
+Slurm's hostlist expression parser is used, so the node and switch
 names need not be consecutive (e.g. "Nodes=tux[0\-3,12,18\-20]"
 and "Switches=s[0\-2,4\-8,12]" will parse fine).
 An optional link speed may also be specified.
@@ -33,7 +33,7 @@ The overall configuration parameters available include:
 
 .TP
 \fBSwitchName\fR
-The name of a switch. This name is internal to SLURM and arbitrary.
+The name of a switch. This name is internal to Slurm and arbitrary.
 Each switch should have a unique name.
 This field must be specified.
 .TP
@@ -55,7 +55,7 @@ It may be used in the future to optimize resource allocations.
 .br
 ##################################################################
 .br
-# SLURM's network topology configuration file for use with the
+# Slurm's network topology configuration file for use with the
 .br
 # topology/tree plugin
 .br
@@ -75,15 +75,15 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2010\-2013 SchedMD LLC.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index 96bfa481bf0..a8eb9022595 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -4,7 +4,7 @@
 wiki.conf \- Slurm configuration file for wiki and wiki2 scheduler plugins
 .SH "DESCRIPTION"
 \fBwiki.conf\fP is an ASCII file which describes wiki and wiki2
-scheduler specific SLURM configuration information.
+scheduler specific Slurm configuration information.
 The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
 environment variable. The file will always be located in the
@@ -14,7 +14,7 @@ Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 Changes to the configuration file take effect upon restart of
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 The overall configuration parameters available include:
@@ -44,7 +44,7 @@ Not applicable to wiki plugin, only the wiki2 plugin.
 
 .TP
 \fBEPort\fR
-Port to be used to notify Moab of events (job submitted to SLURM,
+Port to be used to notify Moab of events (job submitted to Slurm,
 job terminates, etc.).
 This numeric value should match EPORT configured in the
 \fBmoab.cnf\fR file.
@@ -53,25 +53,25 @@ Not applicable to wiki plugin, only the wiki2 plugin.
 .TP
 \fBExcludePartitions\fR
 Identifies partitions whose jobs are to be scheduled directly
-by SLURM rather than Moab/Maui.
-This only effects jobs which are submitted using SLURM
+by Slurm rather than Moab/Maui.
+This only effects jobs which are submitted using Slurm
 commands (i.e. srun, salloc or sbatch, NOT msub from Moab).
 These jobs will be scheduled on a First\-Come\-First\-Served
-basis directly by SLURM.
-Note that SLURM recognizes jobs submitted via msub based
+basis directly by Slurm.
+Note that Slurm recognizes jobs submitted via msub based
 upon the value \fBFirstJobId\fR configured in \fIslurm.conf\fR.
 Set the values \fBMINJOBID\fR and \fBMAXJOBID\fR in \fImoab.cfg\fR
 accordingly.
 For example MINJOBID=1, MAXJOBID=65535 and FirstJobId=65536.
 Jobs submitted using msub will have job ID values in the range
-of 1 and 65535 while jobs submitted directly using SLURM commands
+of 1 and 65535 while jobs submitted directly using Slurm commands
 will have a job ID of 65536 or higher.
 Moab/Maui controls for resource reservation, fair share
 scheduling, etc. will not apply to the initiation of these jobs.
 While Moab/Maui will not control the initiation of jobs in these
 partitions, it will account for and report the jobs.
 If more than one partition is to be scheduled directly by
-SLURM, use a comma separator between their names.
+Slurm, use a comma separator between their names.
 This may provide faster response times than Moab/Maui scheduling.
 
 .TP
@@ -79,7 +79,7 @@ This may provide faster response times than Moab/Maui scheduling.
 Identifies partitions whose jobs are not to be reported to Moab/Maui.
 These jobs will not be accounted for or otherwise visible to Moab/Maui.
 Any partitions listed here must also be listed in \fBExcludePartitions\fR.
-This only effects jobs which are submitted using SLURM commands (i.e.
+This only effects jobs which are submitted using Slurm commands (i.e.
 \fIsrun\fR, \fIsalloc\fR or \fIsbatch\fR, NOT \fImsub\fR from Moab).
 If more than one partition is to have its jobs hidden, use a comma
 separator between their names.
@@ -96,7 +96,7 @@ separator between their names.
 
 .TP
 \fBHostFormat\fR
-Controls the format of host lists exchanged between SLURM and Moab.
+Controls the format of host lists exchanged between Slurm and Moab.
 The default value is "0".
 Not applicable to wiki plugin, only the wiki2 plugin.
 
@@ -106,12 +106,12 @@ Not applicable to wiki plugin, only the wiki2 plugin.
 No data compression. Each host name is listed individually.
 .TP
 \fB1\fR
-SLURM hostlist expressions are exchanged with task counts
+Slurm hostlist expressions are exchanged with task counts
 (e.g. "tux[0\-16]*2") in job state information and job
 initiation requests.
 .TP
 \fB2\fR
-SLURM hostlist expressions are used to report node state information.
+Slurm hostlist expressions are used to report node state information.
 .RE
 
 .TP
@@ -134,7 +134,7 @@ Not applicable to wiki plugin, only the wiki2 plugin.
 .RS
 .TP
 \fBrun\fR
-Job permitted to run directly under SLURM's control
+Job permitted to run directly under Slurm's control
 .TP
 \fBhold\fR
 Hold all incoming jobs until Moab or Maui tell them to run
@@ -145,7 +145,7 @@ Hold all incoming jobs until Moab or Maui tell them to run
 .br
 ##################################################################
 .br
-# SLURM's configuration file for sched/wiki plugin
+# Slurm's configuration file for sched/wiki plugin
 .br
 ##################################################################
 .br
@@ -188,15 +188,15 @@ Copyright (C) 2006-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man8/slurmctld.8 b/doc/man/man8/slurmctld.8
index 7e62ec694a6..7cd1e5b76bb 100644
--- a/doc/man/man8/slurmctld.8
+++ b/doc/man/man8/slurmctld.8
@@ -71,7 +71,7 @@ The following environment variables can be used to override settings
 compiled into slurmctld.
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file. This is overridden by
+The location of the Slurm configuration file. This is overridden by
 explicitly naming a configuration file on the command line.
 
 .SH "CORE FILE LOCATION"
@@ -102,15 +102,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man8/slurmd.8 b/doc/man/man8/slurmd.8
index 7b497bd30e1..2b6df127cc5 100644
--- a/doc/man/man8/slurmd.8
+++ b/doc/man/man8/slurmd.8
@@ -1,7 +1,7 @@
 .TH slurmd "8" "Slurm Daemon" "April 2015" "Slurm Daemon"
 
 .SH "NAME"
-slurmd \- The compute node daemon for SLURM.
+slurmd \- The compute node daemon for Slurm.
 
 .SH "SYNOPSIS"
 \fBslurmd\fR [\fIOPTIONS\fR...]
@@ -54,7 +54,7 @@ Also note the \fBPropagatePrioProcess\fR configuration parameter.
 .TP
 \fB\-N <hostname>\fR
 Run the daemon with the given hostname. Used to emulate a larger system
-with more than one slurmd daemon per node. Requires that SLURM be built using
+with more than one slurmd daemon per node. Requires that Slurm be built using
 the \-\-enable\-multiple\-slurmd configure option.
 
 .TP
@@ -69,7 +69,7 @@ The following environment variables can be used to override settings
 compiled into slurmd.
 .TP 20
 \fBSLURM_CONF\fR
-The location of the SLURM configuration file.  This is overridden by
+The location of the Slurm configuration file.  This is overridden by
 explicitly naming a configuration file on the command line.
 
 .SH "CORE FILE LOCATION"
@@ -98,15 +98,15 @@ Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man8/slurmdbd.8 b/doc/man/man8/slurmdbd.8
index 2b7ef3c9e9d..7242f500da3 100644
--- a/doc/man/man8/slurmdbd.8
+++ b/doc/man/man8/slurmdbd.8
@@ -52,15 +52,15 @@ Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man8/slurmstepd.8 b/doc/man/man8/slurmstepd.8
index d36dc49e025..c8258c032ed 100644
--- a/doc/man/man8/slurmstepd.8
+++ b/doc/man/man8/slurmstepd.8
@@ -1,11 +1,11 @@
 .TH slurmstepd "8" "Slurm Component" "April 2015" "Slurm Component"
 
 .SH "NAME"
-slurmstepd \- The job step manager for SLURM.
+slurmstepd \- The job step manager for Slurm.
 .SH "SYNOPSIS"
 \fBslurmstepd\fR
 .SH "DESCRIPTION"
-\fBslurmstepd\fR is a job step manager for SLURM.
+\fBslurmstepd\fR is a job step manager for Slurm.
 It is spawned by the \fBslurmd\fR daemon when a job step is launched
 and terminates when the job step does.
 It is responsible for managing input and output (stdin, stdout and stderr)
@@ -17,15 +17,15 @@ Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8
index 62c5dfdc2e5..956f006e06b 100644
--- a/doc/man/man8/spank.8
+++ b/doc/man/man8/spank.8
@@ -1,22 +1,22 @@
 .TH SPANK "8" "Slurm Component" "April 2015" "Slurm Component"
 
 .SH "NAME"
-\fBSPANK\fR \- SLURM Plug\-in Architecture for Node and job (K)control
+\fBSPANK\fR \- Slurm Plug\-in Architecture for Node and job (K)control
 
 .SH "DESCRIPTION"
-This manual briefly describes the capabilities of the SLURM Plug\-in
+This manual briefly describes the capabilities of the Slurm Plug\-in
 architecture for Node and job Kontrol (\fBSPANK\fR) as well as the \fBSPANK\fR
 configuration file: (By default: \fBplugstack.conf\fP.)
 .LP
 \fBSPANK\fR provides a very generic interface for stackable plug\-ins
 which may be used to dynamically modify the job launch code in
-SLURM. \fBSPANK\fR plugins may be built without access to SLURM source
-code. They need only be compiled against SLURM's \fBspank.h\fR header file,
+Slurm. \fBSPANK\fR plugins may be built without access to Slurm source
+code. They need only be compiled against Slurm's \fBspank.h\fR header file,
 added to the \fBSPANK\fR config file \fBplugstack.conf\fR,
 and they will be loaded at runtime during the next job launch. Thus,
 the \fBSPANK\fR infrastructure provides administrators and other developers
 a low cost, low effort ability to dynamically modify the runtime
-behavior of SLURM job launch.
+behavior of Slurm job launch.
 .LP
 \fBNote\fR: \fBSPANK\fR plugins using the Slurm APIs need to be recompiled when
 upgrading Slurm to a new major release.
@@ -24,7 +24,7 @@ upgrading Slurm to a new major release.
 
 .SH "SPANK PLUGINS"
 \fBSPANK\fR plugins are loaded in up to five separate contexts during a
-\fBSLURM\fR job. Briefly, the three contexts are:
+\fBSlurm\fR job. Briefly, the three contexts are:
 .TP 8
 \fBlocal\fB
 In \fBlocal\fR context, the plugin is loaded by \fBsrun\fR. (i.e. the "local"
@@ -65,7 +65,7 @@ Plugins may query the context in which they are running with the
 \fBspank_context\fR and \fBspank_remote\fR functions defined in
 \fB<slurm/spank.h>\fR.
 .LP
-\fBSPANK\fR plugins may be called from multiple points during the SLURM job
+\fBSPANK\fR plugins may be called from multiple points during the Slurm job
 launch. A plugin may define the following functions:
 .TP 2
 \fBslurm_spank_init\fR
@@ -111,7 +111,7 @@ tasks have completed fork (2), this call is guaranteed to run before
 the user task is executed. (remote context only)
 .TP
 \fBslurm_spank_task_exit\fR
-Called for each task as its exit status is collected by SLURM.
+Called for each task as its exit status is collected by Slurm.
 (remote context only)
 .TP
 \fBslurm_spank_exit\fR
@@ -132,7 +132,7 @@ All of these functions have the same prototype, for example:
 .fi
 .LP
 Where \fBspank\fR is the \fBSPANK\fR handle which must be passed back to
-SLURM when the plugin calls functions like \fBspank_get_item\fR and
+Slurm when the plugin calls functions like \fBspank_get_item\fR and
 \fBspank_getenv\fR. Configured arguments (See \fBCONFIGURATION\fR
 below) are passed in the argument vector \fBargv\fR with argument
 count \fBac\fR.
@@ -150,7 +150,7 @@ which has the following prototype:
 .LP
 The return value is 1 if the symbol is supported, 0 if not.
 .LP
-\fBSPANK\fR plugins do not have direct access to internally defined SLURM
+\fBSPANK\fR plugins do not have direct access to internally defined Slurm
 data structures. Instead, information about the currently executing
 job is obtained via the \fBspank_get_item\fR function call.
 .nf
@@ -208,13 +208,13 @@ the standard process environment using \fBsetenv\fR (3), \fBgetenv\fR (3),
 and \fBunsetenv\fR (3) may be used in local context.
 .LP
 Functions are also available from within the \fBSPANK\fR plugins to
-establish environment variables to be exported to the SLURM
+establish environment variables to be exported to the Slurm
 \fBPrologSlurmctld\fR, \fBProlog\fR, \fBEpilog\fR and \fBEpilogSlurmctld\fR
 programs (the so-called \fBjob control\fR environment).
 The name of environment variables established by these calls will be prepended
 with the string \fISPANK_\fR in order to avoid any security implications
 of arbitrary environment variable control. (After all, the job control
-scripts do run as root or the SLURM user.).
+scripts do run as root or the Slurm user.).
 .LP
 These functions are available from \fBlocal\fR context only.
 .nf
@@ -246,13 +246,13 @@ string representation.
 .LP
 SPANK plugins also have an interface through which they may define
 and implement extra job options. These options are made available to
-the user through SLURM commands such as \fBsrun\fR(1), \fBsalloc\fR(1),
+the user through Slurm commands such as \fBsrun\fR(1), \fBsalloc\fR(1),
 and \fBsbatch\fR(1). if the option is specified by the user, its value is
 forwarded and registered with the plugin in slurmd when the job is run.
 In this way, \fBSPANK\fR plugins may dynamically provide new options and
-functionality to SLURM.
+functionality to Slurm.
 .LP
-Each option registered by a plugin to SLURM takes the form of
+Each option registered by a plugin to Slurm takes the form of
 a \fBstruct spank_option\fR which is declared in \fB<slurm/spank.h>\fR as
 .nf
 
@@ -289,7 +289,7 @@ A plugin\-local value to return to the option callback function.
 .TP
 .I cb
 A callback function that is invoked when the plugin option is
-registered with SLURM. \fBspank_opt_cb_f\fR is typedef'd in
+registered with Slurm. \fBspank_opt_cb_f\fR is typedef'd in
 \fB<slurm/spank.h>\fR as
 .nf
 
@@ -302,7 +302,7 @@ struct, \fIoptarg\fR is the supplied argument if applicable, and \fIremote\fR
 is 0 if the function is being called from the "local" host
 (e.g. \fBsrun\fR) or 1 from the "remote" host (\fBslurmd\fR).
 .LP
-Plugin options may be registered with SLURM using
+Plugin options may be registered with Slurm using
 the \fBspank_option_register\fR function. This function is only valid
 when called from the plugin's \fBslurm_spank_init\fR handler, and
 registers one option at a time. The prototype is
@@ -329,7 +329,7 @@ If, however, the option is used in all contexts, the \fBspank_option_register\fR
 needs to be called everywhere.
 .LP
 In addition to \fBspank_option_register\fR, plugins may also export options
-to SLURM by defining a table of \fBstruct spank_option\fR with the
+to Slurm by defining a table of \fBstruct spank_option\fR with the
 symbol name \fBspank_options\fR. This method, however, is not supported
 for use with \fBsbatch\fR and \fBsalloc\fR (allocator context), thus
 the use of \fBspank_option_register\fR is preferred. When using the
@@ -337,7 +337,7 @@ the use of \fBspank_option_register\fR is preferred. When using the
 filled with zeros. A \fBSPANK_OPTIONS_TABLE_END\fR macro is provided
 in \fB<slurm/spank.h>\fR for this purpose.
 .LP
-When an option is provided by the user on the local side, \fBSLURM\fR will
+When an option is provided by the user on the local side, \fBSlurm\fR will
 immediately invoke the option's callback with \fIremote\fR=0. This
 is meant for the plugin to do local sanity checking of the option before
 the value is sent to the remote side during job launch. If the argument
@@ -348,7 +348,7 @@ On the remote side, options and their arguments are registered just
 after \fBSPANK\fR plugins are loaded and before the \fBspank_init\fR
 handler is called. This allows plugins to modify behavior of all plugin
 functionality based on the value of user\-provided options.
-(See EXAMPLES below for a plugin that registers an option with \fBSLURM\fR).
+(See EXAMPLES below for a plugin that registers an option with \fBSlurm\fR).
 .LP
 As an alternative to use of an option callback and global variable,
 plugins can use the \fBspank_option_getopt\fR option to check for
@@ -370,7 +370,7 @@ options in \fBjob_script\fR context (\fBslurm_spank_job_prolog\fR and
 .LP
 The default \fBSPANK\fR plug\-in stack configuration file is
 \fBplugstack.conf\fR in the same directory as \fBslurm.conf\fR(5),
-though this may be changed via the SLURM config parameter
+though this may be changed via the Slurm config parameter
 \fIPlugStackConfig\fR.  Normally the \fBplugstack.conf\fR file
 should be identical on all nodes of the cluster.
 The config file lists \fBSPANK\fR plugins,
@@ -464,7 +464,7 @@ priority may be established via a "min_prio" parameter in \fBplugstack.conf\fR
 
 /*
  * All spank plugins must define this macro for the
- * SLURM plugin loader.
+ * Slurm plugin loader.
  */
 SPANK_PLUGIN(renice, 1);
 
@@ -612,20 +612,20 @@ Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
-This file is part of SLURM, a resource management program.
+This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
 .LP
-SLURM is free software; you can redistribute it and/or modify it under
+Slurm is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
 Software Foundation; either version 2 of the License, or (at your option)
 any later version.
 .LP
-SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "FILES"
-\fB/etc/slurm/slurm.conf\fR \- SLURM configuration file.
+\fB/etc/slurm/slurm.conf\fR \- Slurm configuration file.
 .br
 \fB/etc/slurm/plugstack.conf\fR \- SPANK configuration file.
 .br
-- 
GitLab