diff --git a/doc/bgl.report/Makefile b/doc/bgl.report/Makefile
index 7fe7a35ec07bed12f83b11f0ca8fc0bd38e73ccd..0a293267ef0cb7b5748cdd6a6a7e1e4a65ffd11e 100644
--- a/doc/bgl.report/Makefile
+++ b/doc/bgl.report/Makefile
@@ -19,12 +19,12 @@ FIGS = $(FIGDIR)/arch.eps \
        $(FIGDIR)/interactive-job-init.eps \
        $(FIGDIR)/slurm-arch.eps
 
-PLOTS = $(FIGDIR)/times.eps 
+PLOTS = $(FIGDIR)/times.eps
 
 BIB = ../common/project.bib
 
 %.eps: %.dia
-	dia --nosplash -e $@ $< 
+	dia --nosplash -e $@ $<
 %.eps: %.gpl
 	gnuplot $<
 %.eps: %.fig
@@ -34,9 +34,9 @@ BIB = ../common/project.bib
 %.ps: %.dvi
 	dvips -K -t letter -o $(@F) $(<F)
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
-all: $(REPORT).ps 
+all: $(REPORT).ps
 
 
 $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB) smap.output
@@ -51,6 +51,6 @@ $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB) smap.output
 view: $(REPORT).ps
 	ghostview $(REPORT) &
 
-clean: 
+clean:
 	rm -f *~ *.dvi *.log *.aux $(REPORT).pdf $(REPORT).ps *.blg *.bbl #*.eps #*.gif *.ps
-	      
+
diff --git a/doc/bgl.report/report.tex b/doc/bgl.report/report.tex
index f4871acae1363e629bdb9ffb8c22a7275df8ea7d..c7f4f96d04aa1b2086a1d3a2eeada5a5db75a99c 100644
--- a/doc/bgl.report/report.tex
+++ b/doc/bgl.report/report.tex
@@ -1,30 +1,30 @@
-% Presenter info: 
+% Presenter info:
 % http://www.linuxclustersinstitute.org/Linux-HPC-Revolution/presenterinfo.html
 %
 % Main Text Layout
-% Set the main text in 10 point Times Roman or Times New Roman (normal), 
-% (no boldface), using single line spacing. All text should be in a single 
-% column and justified. 
+% Set the main text in 10 point Times Roman or Times New Roman (normal),
+% (no boldface), using single line spacing. All text should be in a single
+% column and justified.
 %
 % Opening Style (First Page)
-% This includes the title of the paper, the author names, organization and 
+% This includes the title of the paper, the author names, organization and
 % country, the abstract, and the first part of the paper.
-% * Start the title 35mm down from the top margin in Times Roman font, 16 
-%   point bold, range left. Capitalize only the first letter of the first 
+% * Start the title 35mm down from the top margin in Times Roman font, 16
+%   point bold, range left. Capitalize only the first letter of the first
 %   word and proper nouns.
-% * On a new line, type the authors' names, organizations, and country only 
-%   (not the full postal address, although you may add the name of your 
+% * On a new line, type the authors' names, organizations, and country only
+%   (not the full postal address, although you may add the name of your
 %   department), in Times Roman, 11 point italic, range left.
-% * Start the abstract with the heading two lines below the last line of the 
+% * Start the abstract with the heading two lines below the last line of the
 %   address. Set the abstract in Times Roman, 12 point bold.
-% * Leave one line, then type the abstract in Times Roman 10 point, justified 
+% * Leave one line, then type the abstract in Times Roman 10 point, justified
 %   with single line spacing.
 %
 % Other Pages
-% For the second and subsequent pages, use the full 190 x 115mm area and type 
-% in one column beginning at the upper right of each page, inserting tables 
+% For the second and subsequent pages, use the full 190 x 115mm area and type
+% in one column beginning at the upper right of each page, inserting tables
 % and figures as required.
-% 
+%
 % We're recommending the Lecture Notes in Computer Science styles from
 % Springer Verlag --- google on Springer Verlag LaTeX.  These work nicely,
 % *except* that it does not work with the hyperref package. Sigh.
@@ -92,18 +92,18 @@
 \vskip1\baselineskip
 % Abstract itself - 10pt
 \noindent\normalsize
-The Blue Gene/L (BGL) system is a highly scalable computer developed 
-by IBM and deployed Lawrence Livermore National Laboratory (LLNL). 
-The current system has over 131,000 processors interconnected by a 
-three-dimensional toroidal network with complex rules for managing 
+The Blue Gene/L (BGL) system is a highly scalable computer developed
+by IBM and deployed Lawrence Livermore National Laboratory (LLNL).
+The current system has over 131,000 processors interconnected by a
+three-dimensional toroidal network with complex rules for managing
 the network and allocating resources to jobs.
-SLURM (Simple Linux Utility for Resource Management ) was selected to 
-fulfull this role. 
-SLURM is an open source, fault-tolerant, and highly scalable cluster 
+SLURM (Simple Linux Utility for Resource Management ) was selected to
+fulfull this role.
+SLURM is an open source, fault-tolerant, and highly scalable cluster
 management and job scheduling system in widespread use on Linux clusters.
 This paper presents overviews of BGL resource management issues and
 SLURM architecture.
-It also presents a description of how SLURM provides resource 
+It also presents a description of how SLURM provides resource
 management for BGL and preliminary performance results.
 
 % define some additional macros for the body
@@ -119,98 +119,98 @@ management for BGL and preliminary performance results.
 
 \section{Overview}
 
-The BlueGene/L (BGL) system offers a unique cell-based design in which 
-the capacity can be expanded without introducing bottlenecks 
+The BlueGene/L (BGL) system offers a unique cell-based design in which
+the capacity can be expanded without introducing bottlenecks
 \cite{BlueGeneWeb,BlueGeneL2002}.
-The Blue Gene/L system delivered to LLNL consists of 
-131,072 processors and 33TB of memory \cite{BlueGene2002}.  
+The Blue Gene/L system delivered to LLNL consists of
+131,072 processors and 33TB of memory \cite{BlueGene2002}.
 The peak computational rate will exceed 360 TeraFLOPs.
 
-Simple Linux Utility for Resource Management (SLURM)\footnote{A tip of 
+Simple Linux Utility for Resource Management (SLURM)\footnote{A tip of
 the hat to Matt Groening and creators of {\em Futurama},
-where Slurm is the most popular carbonated beverage in the universe.} 
-is a resource management system suitable for use on both small and 
-very large clusters. 
+where Slurm is the most popular carbonated beverage in the universe.}
+is a resource management system suitable for use on both small and
+very large clusters.
 SLURM was developed by Lawrence Livermore National Laboratory
-(LLNL), Linux NetworX and HP. 
-It has been deployed on hundreds of Linux clusters world-wide and has 
+(LLNL), Linux NetworX and HP.
+It has been deployed on hundreds of Linux clusters world-wide and has
 proven both highly reliable and highly scalalble.
 
 \section{Architecture of Blue Gene/L}
 
-The basic building-blocks of BGL are c-nodes. 
+The basic building-blocks of BGL are c-nodes.
 Each c-node consists
-of two processors based upon the PowerPC 550GX, 512 MB of memory 
+of two processors based upon the PowerPC 550GX, 512 MB of memory
 and support for five separate networks on a single chip.
-One of the processors may be used for computations and the 
-second used exclusively for communications. 
-Alternately, both processors may be used for computations. 
-These c-nodes are subsequently grouped into base partitions, each consisting 
-of 512 c-nodes in an eight by eight by eight array with the same 
+One of the processors may be used for computations and the
+second used exclusively for communications.
+Alternately, both processors may be used for computations.
+These c-nodes are subsequently grouped into base partitions, each consisting
+of 512 c-nodes in an eight by eight by eight array with the same
 network support.
-The BGL system delivered to LLNL consists of 128 base 
+The BGL system delivered to LLNL consists of 128 base
 partitions organized in an eight by four by four array.
-The minimal resource allocation unit for applications is one 
-base partition so that at most 128 simultaneous jobs may execute. 
+The minimal resource allocation unit for applications is one
+base partition so that at most 128 simultaneous jobs may execute.
 
-The c-nodes execute a custom micro-kernel. 
-System calls that can not directly be processed by the c-node 
-micro-kernel are routed to one of the systems I/O nodes. 
-There are 1024 I/O nodes running the Linux operating system, 
+The c-nodes execute a custom micro-kernel.
+System calls that can not directly be processed by the c-node
+micro-kernel are routed to one of the systems I/O nodes.
+There are 1024 I/O nodes running the Linux operating system,
 each of which service the requests from 64 c-nodes.
 
-Three distinct communications networks are supported: 
+Three distinct communications networks are supported:
 a three-dimensional torus with direct nearest-neighbor connections;
-a global tree network for broadcast and reduction operations; and 
-a barrier network for synchronization. 
-The torus network connects each node to 
-its nearest neighbors in the X, Y and Z directions for a 
-total of six of these connections for each node. 
-
-Only parallel user applications execute on the c-node. 
-BGL has eight front-end nodes for other user tasks. 
-Users can login to the front-end nodes, compile and 
+a global tree network for broadcast and reduction operations; and
+a barrier network for synchronization.
+The torus network connects each node to
+its nearest neighbors in the X, Y and Z directions for a
+total of six of these connections for each node.
+
+Only parallel user applications execute on the c-node.
+BGL has eight front-end nodes for other user tasks.
+Users can login to the front-end nodes, compile and
 launch parallel applications. Front-end nodes can also
 be used for pre- and post-processing of data files.
 
-BGL system administrative functions are performed on a 
-computer known as the service node, which also maintains 
+BGL system administrative functions are performed on a
+computer known as the service node, which also maintains
 a DB2 database used for many BGL management functions.
 
 TO DO: Mesh vs. Torus.  Wiring rules.
 
 TO DO: Overhead of starting a new job (e.g. reboot nodes).
 
-NOTE: Be careful not to use non-public information (don't use 
+NOTE: Be careful not to use non-public information (don't use
 information directly from the "IBM Confidential" documents).
 
 \section{Architecture of SLURM}
 
-Only a brief description of SLURM architecture and implemenation is provided 
-here. 
-A more thorough treatment of the SLURM design and implementation is 
+Only a brief description of SLURM architecture and implemenation is provided
+here.
+A more thorough treatment of the SLURM design and implementation is
 available from several sources \cite{SLURM2003,SlurmWeb}.
 
-Several SLURM features make it well suited to serve as a resource manager 
+Several SLURM features make it well suited to serve as a resource manager
 for Blue Gene/L.
 
 \begin{itemize}
 
-\item {\tt Scalability}: 
-The SLURM daemons are highly parallel  with independent read and write 
-locks on the various data structures. 
-SLURM presently manages several Linux clusters with over 1000 nodes 
+\item {\tt Scalability}:
+The SLURM daemons are highly parallel  with independent read and write
+locks on the various data structures.
+SLURM presently manages several Linux clusters with over 1000 nodes
 and executes full-system parallel jobs on these systems in a few seconds.
 
-\item {\tt Portability}: 
-SLURM is written in the C language, with a GNU {\em autoconf} configuration engine.  
-While initially written for Linux, other Unix-like operating systems including 
+\item {\tt Portability}:
+SLURM is written in the C language, with a GNU {\em autoconf} configuration engine.
+While initially written for Linux, other Unix-like operating systems including
 AIX have proven easy porting targets.
-SLURM also supports a general purpose ``plugin'' mechanism, which 
+SLURM also supports a general purpose ``plugin'' mechanism, which
 permits a variety of different infrastructures to be easily supported.
-The SLURM configuration file specifies which set of plugin modules 
-should be used. 
-For example, plugins are used for interfacing with different authentication 
+The SLURM configuration file specifies which set of plugin modules
+should be used.
+For example, plugins are used for interfacing with different authentication
 mechanisms and interconnects.
 
 \item {\tt Fault Tolerance}: SLURM can handle a variety of failure
@@ -218,12 +218,12 @@ modes without terminating workloads, including crashes of the node
 running the SLURM controller.  User jobs may be configured to continue
 execution despite the failure of one or more nodes on which they are
 executing.  The user command controlling a job, {\tt srun}, may detach
-and reattach from the parallel tasks at any time.  
+and reattach from the parallel tasks at any time.
 
 \item {\tt System Administrator Friendly}: SLURM utilizes
 a simple configuration file and minimizes distributed state.
 Its configuration may be changed at any time without impacting running
-jobs.  SLURM interfaces are usable by scripts and its behavior is 
+jobs.  SLURM interfaces are usable by scripts and its behavior is
 highly deterministic.
 
 \end{itemize}
@@ -245,31 +245,31 @@ pending work.
 As shown in Figure~\ref{arch}, SLURM consists of a \slurmd\ daemon
 running on each compute node, a central \slurmctld\ daemon running
 on a management node (with optional fail-over twin), and five command
-line utilities: \srun\, \scancel, \sinfo\, \squeue\, and \scontrol\, 
+line utilities: \srun\, \scancel, \sinfo\, \squeue\, and \scontrol\,
 which can run anywhere in the cluster.
 
 The central controller daemon, \slurmctld\, maintains the global
 state and directs operations.
 \slurmctld\ monitors the state of nodes (through {\tt slurmd}),
-groups nodes into partitions with various contraints,  
+groups nodes into partitions with various contraints,
 manages a queue of pending work, and
 allocates resouces to pending jobs and job steps.
-\slurmctld\ does not directly execute any user jobs, but 
+\slurmctld\ does not directly execute any user jobs, but
 provides overall management of jobs and resources.
-  
-Compute nodes simply run a \slurmd\ daemon (similar to a remote 
+
+Compute nodes simply run a \slurmd\ daemon (similar to a remote
 shell daemon) to export control to SLURM.
-Each \slurmd\ monitors machine status, 
-performs remote job execution, manages the job's I/O, and otherwise 
+Each \slurmd\ monitors machine status,
+performs remote job execution, manages the job's I/O, and otherwise
 manages the jobs and job steps for its execution host.
 
-Users interact with SLURM through four command line utilities: 
-\srun\ for submitting a job for execution and optionally controlling 
-it interactively, 
+Users interact with SLURM through four command line utilities:
+\srun\ for submitting a job for execution and optionally controlling
+it interactively,
 \scancel\ for signalling or terminating a pending or running job,
-\squeue\ for monitoring job queues, and 
-\sinfo\ for monitoring partition and overall system state.  
-System administrators perform privileged operations through an 
+\squeue\ for monitoring job queues, and
+\sinfo\ for monitoring partition and overall system state.
+System administrators perform privileged operations through an
 additional command line utility, {\tt scontrol}.
 
 The entities managed by these SLURM daemons include {\em nodes}, the
@@ -278,9 +278,9 @@ logical disjoint sets, {\em jobs}, or allocations of resources assigned
 to a user for a specified amount of time, and {\em job steps}, which
 are sets of (possibly parallel) tasks within a job.  Each job in the
 priority-ordered queue is allocated nodes within a single partition.
-Once a job is assigned a set of nodes, the user is able to initiate 
-parallel work in the form of job steps in any configuration within 
-the job's allocation. 
+Once a job is assigned a set of nodes, the user is able to initiate
+parallel work in the form of job steps in any configuration within
+the job's allocation.
 For instance, a single job step may be started that utilizes all nodes
 allocated to the job, or several job steps may independently use a
 portion of the allocation.
@@ -306,9 +306,9 @@ by the SLURM libraries.  A plugin provides a customized implementation
 of a well-defined API connected to tasks such as authentication,
 or job scheduling.  A common set of functions is defined
 for use by all of the different infrastructures of a particular variety.
-For example, the authentication plugin must define functions such as 
+For example, the authentication plugin must define functions such as
 {\tt slurm\_auth\_create} to create a credential, {\tt slurm\_auth\_verify}
-to verify a credential to approve or deny authentication, etc.  
+to verify a credential to approve or deny authentication, etc.
 When a SLURM command or daemon is initiated, it
 reads the configuration file to determine which of the available plugins
 should be used.  For example {\em AuthType=auth/munge} says to use the
@@ -317,91 +317,91 @@ identifies the directory in which to find the plugin.
 
 \section {Blue Gene/L Specific Resource Management Issues}
 
-Since a BGL base partition is the minimum allocation unit for a job, 
-it was natural to consider each one as an independent SLURM node. 
-This meant SLURM would manage a very reasonable 128 nodes 
+Since a BGL base partition is the minimum allocation unit for a job,
+it was natural to consider each one as an independent SLURM node.
+This meant SLURM would manage a very reasonable 128 nodes
 rather than tens of thousands of individual c-nodes.
-The \slurmd\ daemon was originally designed to execute on each 
-SLURM node to monitor the status of that node, launch job steps, etc. 
-Unfortunately BGL prohibited the execute of SLURM daemons within 
-the base partitions on any of the c-nodes. 
-SLURM was compelled to execute \slurmd\ on one or more 
+The \slurmd\ daemon was originally designed to execute on each
+SLURM node to monitor the status of that node, launch job steps, etc.
+Unfortunately BGL prohibited the execute of SLURM daemons within
+the base partitions on any of the c-nodes.
+SLURM was compelled to execute \slurmd\ on one or more
 front-end nodes.
-In addition,  the typical Unix mechanism used to interact with a 
-compute host (e.g. getting memory size or processor count) do not 
-function normally with BGL base partitions. 
-This issue was addressed by adding a SLURM parameter to 
-indicate when it is running with a front-end node, in which case 
-there is assumed to be a single \slurmd\ for the entire system. 
-We anticipate changing this in the future to support multiple 
+In addition,  the typical Unix mechanism used to interact with a
+compute host (e.g. getting memory size or processor count) do not
+function normally with BGL base partitions.
+This issue was addressed by adding a SLURM parameter to
+indicate when it is running with a front-end node, in which case
+there is assumed to be a single \slurmd\ for the entire system.
+We anticipate changing this in the future to support multiple
 \slurmd\ daemons on the front-end nodes.
 
 SLURM was originally designed to address a one-dimensional topology
-and this impacted a variety of areas from naming convensions to 
-node selection. 
-SLURM provides resource management on several Linux clusters 
-exceeding 1000 nodes and it is impractical to display or otherwise 
-work with hundreds of individual node names. 
-SLURM addresses this by using a compressed hostlist range format to indicate 
-ranges of node names. 
-For example, "linux[0-1023]" was used to represent 1024 nodes 
-with names having a prefix of "linux" and a numeric suffic ranging 
-from "0" to "1023" (e.g. "linux0" through "linux1023"). 
-The most reasonable way to name the BGL nodes seemed to be 
-using a three digit suffix, but rather than indicate a monotonically 
-increasing number, each digit would represent the base partition's 
-location in the X, Y and Z dimensions (the value of X ranges 
+and this impacted a variety of areas from naming convensions to
+node selection.
+SLURM provides resource management on several Linux clusters
+exceeding 1000 nodes and it is impractical to display or otherwise
+work with hundreds of individual node names.
+SLURM addresses this by using a compressed hostlist range format to indicate
+ranges of node names.
+For example, "linux[0-1023]" was used to represent 1024 nodes
+with names having a prefix of "linux" and a numeric suffic ranging
+from "0" to "1023" (e.g. "linux0" through "linux1023").
+The most reasonable way to name the BGL nodes seemed to be
+using a three digit suffix, but rather than indicate a monotonically
+increasing number, each digit would represent the base partition's
+location in the X, Y and Z dimensions (the value of X ranges
 from 0 to 7, Y from 0 to 3, and Z from 0 to 3 on the LLNL system).
 For example, "bgl012" would represent the base partition at
 the position X=0, Y=1 and Z=2.
-Since BGL resources naturally tend to be rectangular prisms in 
-shape, we modified the hostlist range format to indicate the two 
-extreme base partition locations. 
-The name prefix is always "bgl". 
+Since BGL resources naturally tend to be rectangular prisms in
+shape, we modified the hostlist range format to indicate the two
+extreme base partition locations.
+The name prefix is always "bgl".
 Within the brackets one lists the base partition with the smallest
-X, Y and Z coordinates followed by a "x" followed by the base 
+X, Y and Z coordinates followed by a "x" followed by the base
 partition with the highest X, Y and Z coordinates.
-For example, "bgl[200x311]" represents the following eight base 
+For example, "bgl[200x311]" represents the following eight base
 partitions: bgl200, bgl201, bgl210, bgl211, bgl300, bgl301, bgl310
 and bgl311.
-Note that this method does can not accomodate blocks of base 
-partitions that wrap over the torus boundaries particularly well, 
-although a hostlist range format of this sort is supported: 
+Note that this method does can not accomodate blocks of base
+partitions that wrap over the torus boundaries particularly well,
+although a hostlist range format of this sort is supported:
 "bgl[000x-011,700x711]".
 
-The node selection functionality is another topology aware 
-SLURM component. 
-Rather than embedding BGL-specific logic into a multitude of 
-locations, all of this logic was put into a single plugin. 
-The pre-existing node selection logic was put into a plugin 
-supporting typical Linux clusters with node names based 
-upon a one-dimensional array. 
-The BGL-specific plugin not only selects nodes for pending jobs 
-based upon BGL topography, but issues the BGL-specific APIs 
-to monitor the system health (draining nodes with any failure 
+The node selection functionality is another topology aware
+SLURM component.
+Rather than embedding BGL-specific logic into a multitude of
+locations, all of this logic was put into a single plugin.
+The pre-existing node selection logic was put into a plugin
+supporting typical Linux clusters with node names based
+upon a one-dimensional array.
+The BGL-specific plugin not only selects nodes for pending jobs
+based upon BGL topography, but issues the BGL-specific APIs
+to monitor the system health (draining nodes with any failure
 mode) and perform initialization and termination sequences for the job.
 
-BGL's topology requirement necessitated the addition of several 
-\srun\ options: {\em --geometry} to specify the dimension required by 
+BGL's topology requirement necessitated the addition of several
+\srun\ options: {\em --geometry} to specify the dimension required by
 the job,
- {\em --no-rotate} to indicate of the geometry specification could rotate 
+ {\em --no-rotate} to indicate of the geometry specification could rotate
 in three-dimensions,
 {\em --comm-type} to indicate the communctions type being mesh or torus,
-{\em --node-use} to specify if the second process on a c-node should 
-be used to execute the user application or be used for communications. 
-While \srun\ accepts these new options on all computer systems, 
-the node selection plugin logic is used to manage this data in an 
-opaque data type. 
-Since these new data types are unused on non-BGL systems, the 
-functions to manage them perform no work. 
-Other computers with other topology requiremens will be able to 
+{\em --node-use} to specify if the second process on a c-node should
+be used to execute the user application or be used for communications.
+While \srun\ accepts these new options on all computer systems,
+the node selection plugin logic is used to manage this data in an
+opaque data type.
+Since these new data types are unused on non-BGL systems, the
+functions to manage them perform no work.
+Other computers with other topology requiremens will be able to
 take advantage of this plugin infrastructure with minimal effort.
 
-In order to provide users with a clear view of the BGL topology, a new 
+In order to provide users with a clear view of the BGL topology, a new
 tools was developed.
-\smap\ presents the same type of information as the \sinfo\ and \squeue\ 
-commands, but graphically displays the location of SLURM nodes 
-(BGL base partitions) assigned to partitions or partitions as shown in 
+\smap\ presents the same type of information as the \sinfo\ and \squeue\
+commands, but graphically displays the location of SLURM nodes
+(BGL base partitions) assigned to partitions or partitions as shown in
 Table ~\ref{smap_out}.
 
 \begin{table}[t]
@@ -409,10 +409,10 @@ Table ~\ref{smap_out}.
 
 \begin{tabular}[c]{c}
 \\
-\fbox{ 
+\fbox{
    \begin{minipage}[c]{1.0\linewidth}
-     {\scriptsize \verbatiminput{smap.output} } 
-   \end{minipage} 
+     {\scriptsize \verbatiminput{smap.output} }
+   \end{minipage}
 }
 \\
 \end{tabular}
@@ -420,17 +420,17 @@ Table ~\ref{smap_out}.
 \end{center}
 \end{table}
 
-Rather than modifying SLURM to initiate and manage the parallel 
-tasks for BGL jobs, we decided utilize existing software from IBM. 
-This eliminated a multitude of software integration issues. 
-SLURM will manage resources, select resources for the job, 
-set an environment variable BGL\_PARTITION\_ID, and spawn 
-a script. 
+Rather than modifying SLURM to initiate and manage the parallel
+tasks for BGL jobs, we decided utilize existing software from IBM.
+This eliminated a multitude of software integration issues.
+SLURM will manage resources, select resources for the job,
+set an environment variable BGL\_PARTITION\_ID, and spawn
+a script.
 The job will initiate its parallel tasks through the use of {\em mpirun}.
-{\em mpirun} uses BGL-specific APIs to launch and manage the 
-tasks. 
-We disabled SLURM's job step support for normal users to 
-mitigate the possible impact of users inadvertently attempting 
+{\em mpirun} uses BGL-specific APIs to launch and manage the
+tasks.
+We disabled SLURM's job step support for normal users to
+mitigate the possible impact of users inadvertently attempting
 to initiate job steps through SLURM.
 
 \section{Blue Gene/L Network Wiring Issues}
diff --git a/doc/clusterworld/Makefile b/doc/clusterworld/Makefile
index 282b42f89fdfff3c3ca69d839bfb401d76858e4d..1863cb2280eaa5b434befcae3f93beab4cc84a9b 100644
--- a/doc/clusterworld/Makefile
+++ b/doc/clusterworld/Makefile
@@ -10,7 +10,7 @@
 
 REPORT = report
 
-TEX = ../common/llnlCoverPage.tex $(REPORT).tex 
+TEX = ../common/llnlCoverPage.tex $(REPORT).tex
 
 FIGDIR = ../figures
 FIGS = $(FIGDIR)/allocate-init.eps \
@@ -22,12 +22,12 @@ FIGS = $(FIGDIR)/allocate-init.eps \
        $(FIGDIR)/slurm-arch.eps \
        $(FIGDIR)/times.eps
 
-PLOTS = $(FIGDIR)/times.eps 
+PLOTS = $(FIGDIR)/times.eps
 
 BIB = ../common/project.bib
 
 %.eps: %.dia
-	dia --nosplash -e $@ $< 
+	dia --nosplash -e $@ $<
 %.eps: %.gpl
 	gnuplot $<
 %.eps: %.fig
@@ -37,9 +37,9 @@ BIB = ../common/project.bib
 %.ps: %.dvi
 	dvips -K -t letter -o $(@F) $(<F)
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
-all: $(REPORT).ps 
+all: $(REPORT).ps
 
 
 $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
@@ -54,6 +54,6 @@ $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
 view: $(REPORT).ps
 	ghostview $(REPORT) &
 
-clean: 
+clean:
 	rm -f *~ *.dvi *.log *.aux $(REPORT).ps *.blg *.bbl #*.eps #*.gif *.ps
-	      
+
diff --git a/doc/clusterworld/bio.txt b/doc/clusterworld/bio.txt
index f8155e3cf66f9a6cda7373955c0e6b1648eb5fbd..256cf4af04cc3a59d97bfef73c0556b9488db72e 100644
--- a/doc/clusterworld/bio.txt
+++ b/doc/clusterworld/bio.txt
@@ -1,9 +1,9 @@
-Morris Jette is a computer scientist with the Integrated 
-Computational Resource Management Group at Lawrence Livermore 
-National Laboratory. His primary research interest computer 
-scheduling, from individual tasks and processors to distributed 
+Morris Jette is a computer scientist with the Integrated
+Computational Resource Management Group at Lawrence Livermore
+National Laboratory. His primary research interest computer
+scheduling, from individual tasks and processors to distributed
 applications running across a computational grid.
 
-Mark Grondona is a computer scientist with the Production 
-Linux Group at Lawrence Livermore National Laboratory, where 
+Mark Grondona is a computer scientist with the Production
+Linux Group at Lawrence Livermore National Laboratory, where
 he works on cluster administration tools and resource management.
diff --git a/doc/clusterworld/report.tex b/doc/clusterworld/report.tex
index 0b3d5e66c0a64e09c2218d64c78f1d643223fa3e..592383e7f269b8ebb4bfd8c0804c7d80efe14c17 100644
--- a/doc/clusterworld/report.tex
+++ b/doc/clusterworld/report.tex
@@ -1,30 +1,30 @@
-% Presenter info: 
+% Presenter info:
 % http://www.linuxclustersinstitute.org/Linux-HPC-Revolution/presenterinfo.html
 %
 % Main Text Layout
-% Set the main text in 10 point Times Roman or Times New Roman (normal), 
-% (no boldface), using single line spacing. All text should be in a single 
-% column and justified. 
+% Set the main text in 10 point Times Roman or Times New Roman (normal),
+% (no boldface), using single line spacing. All text should be in a single
+% column and justified.
 %
 % Opening Style (First Page)
-% This includes the title of the paper, the author names, organization and 
+% This includes the title of the paper, the author names, organization and
 % country, the abstract, and the first part of the paper.
-% * Start the title 35mm down from the top margin in Times Roman font, 16 
-%   point bold, range left. Capitalize only the first letter of the first 
+% * Start the title 35mm down from the top margin in Times Roman font, 16
+%   point bold, range left. Capitalize only the first letter of the first
 %   word and proper nouns.
-% * On a new line, type the authors' names, organizations, and country only 
-%   (not the full postal address, although you may add the name of your 
+% * On a new line, type the authors' names, organizations, and country only
+%   (not the full postal address, although you may add the name of your
 %   department), in Times Roman, 11 point italic, range left.
-% * Start the abstract with the heading two lines below the last line of the 
+% * Start the abstract with the heading two lines below the last line of the
 %   address. Set the abstract in Times Roman, 12 point bold.
-% * Leave one line, then type the abstract in Times Roman 10 point, justified 
+% * Leave one line, then type the abstract in Times Roman 10 point, justified
 %   with single line spacing.
 %
 % Other Pages
-% For the second and subsequent pages, use the full 190 x 115mm area and type 
-% in one column beginning at the upper right of each page, inserting tables 
+% For the second and subsequent pages, use the full 190 x 115mm area and type
+% in one column beginning at the upper right of each page, inserting tables
 % and figures as required.
-% 
+%
 % We're recommending the Lecture Notes in Computer Science styles from
 % Springer Verlag --- google on Springer Verlag LaTeX.  These work nicely,
 % *except* that it does not work with the hyperref package. Sigh.
@@ -113,13 +113,13 @@ architecture and functionality.
 
 \section{Overview}
 
-Simple Linux Utility for Resource Management (SLURM)\footnote{A tip of 
+Simple Linux Utility for Resource Management (SLURM)\footnote{A tip of
 the hat to Matt Groening and creators of {\em Futurama},
-where Slurm is the most popular carbonated beverage in the universe.} 
-is a resource management system suitable for use on large and small Linux 
-clusters.  After surveying \cite{Jette2002} resource managers available 
-for Linux and finding none that were simple, highly scalable, and portable 
-to different cluster architectures and interconnects, the authors set out 
+where Slurm is the most popular carbonated beverage in the universe.}
+is a resource management system suitable for use on large and small Linux
+clusters.  After surveying \cite{Jette2002} resource managers available
+for Linux and finding none that were simple, highly scalable, and portable
+to different cluster architectures and interconnects, the authors set out
 to design a new system.
 
 The resulting design is a resource management system with the following general
@@ -127,25 +127,25 @@ characteristics:
 
 \begin{itemize}
 \item {\tt Simplicity}: SLURM is simple enough to allow motivated end users
-to understand its source code and add functionality.  The authors will 
-avoid the temptation to add features unless they are of general appeal. 
+to understand its source code and add functionality.  The authors will
+avoid the temptation to add features unless they are of general appeal.
 
-\item {\tt Open Source}: SLURM is available to everyone and will remain free. 
-Its source code is distributed under the GNU General Public 
+\item {\tt Open Source}: SLURM is available to everyone and will remain free.
+Its source code is distributed under the GNU General Public
 License \cite{GPL2002}.
 
-\item {\tt Portability}: SLURM is written in the C language, with a GNU 
-{\em autoconf} configuration engine.  
-While initially written for Linux, other Unix-like operating systems 
+\item {\tt Portability}: SLURM is written in the C language, with a GNU
+{\em autoconf} configuration engine.
+While initially written for Linux, other Unix-like operating systems
 should be easy porting targets.
-SLURM also supports a general purpose ``plugin'' mechanism, which 
-permits a variety of different infrastructures to be easily supported. 
-The SLURM configuration file specifies which set of plugin modules 
-should be used. 
+SLURM also supports a general purpose ``plugin'' mechanism, which
+permits a variety of different infrastructures to be easily supported.
+The SLURM configuration file specifies which set of plugin modules
+should be used.
 
 \item {\tt Interconnect Independence}: SLURM currently supports UDP/IP-based
-communication and the Quadrics Elan3 interconnect.  Adding support for 
-other interconnects, including topography constraints, is straightforward 
+communication and the Quadrics Elan3 interconnect.  Adding support for
+other interconnects, including topography constraints, is straightforward
 and utilizes the plugin mechanism described above.
 
 \item {\tt Scalability}: SLURM is designed for scalability to clusters of
@@ -162,7 +162,7 @@ executing.  The user command controlling a job, {\tt srun}, may detach
 and reattach from the parallel tasks at any time.  Nodes allocated to
 a job are available for reuse as soon as the job(s) allocated to that
 node terminate.  If some nodes fail to complete job termination in a
-timely fashion because of hardware or software problems, only the 
+timely fashion because of hardware or software problems, only the
 scheduling of those tardy nodes will be affected.
 
 \item {\tt Security}: SLURM employs crypto technology to authenticate
@@ -278,7 +278,7 @@ explained in more detail below.
 
 \slurmd\ is a multi-threaded daemon running on each compute node and
 can be compared to a remote shell daemon: it reads the common SLURM
-configuration file and saved state information, 
+configuration file and saved state information,
 notifies the controller that it is active, waits
 for work, executes the work, returns status, then waits for more work.
 Because it initiates jobs for other users, it must run as user {\em root}.
@@ -302,7 +302,7 @@ a process may include terminating all members of a process group and
 executing an epilog program.
 
 \item {\tt Stream Copy Service}: Allow handling of stderr, stdout, and
-stdin of remote tasks. Job input may be redirected 
+stdin of remote tasks. Job input may be redirected
 from a single file or multiple files (one per task), an
 \srun\ process, or /dev/null.  Job output may be saved into local files or
 returned to the \srun\ command. Regardless of the location of stdout/err,
@@ -319,7 +319,7 @@ requests to any set of locally managed processes.
 Most SLURM state information exists in {\tt slurmctld}, also known as
 the controller.  \slurmctld\ is multi-threaded with independent read
 and write locks for the various data structures to enhance scalability.
-When \slurmctld\ starts, it reads the SLURM configuration file and 
+When \slurmctld\ starts, it reads the SLURM configuration file and
 any previously saved state information.  Full controller state
 information is written to disk periodically, with incremental changes
 written to disk immediately for fault tolerance.  \slurmctld\ runs in
@@ -365,9 +365,9 @@ scheduling cycle as described above.
 
 \subsection{Command Line Utilities}
 
-The command line utilities offer users access to remote execution and 
-job control. They also permit administrators to dynamically change 
-the system configuration. These commands use SLURM APIs that are 
+The command line utilities offer users access to remote execution and
+job control. They also permit administrators to dynamically change
+the system configuration. These commands use SLURM APIs that are
 directly available for more sophisticated applications.
 
 \begin{itemize}
@@ -412,9 +412,9 @@ by the SLURM libraries.  A plugin provides a customized implementation
 of a well-defined API connected to tasks such as authentication,
 interconnect fabric, and task scheduling.  A common set of functions is defined
 for use by all of the different infrastructures of a particular variety.
-For example, the authentication plugin must define functions such as 
+For example, the authentication plugin must define functions such as
 {\tt slurm\_auth\_create} to create a credential, {\tt slurm\_auth\_verify}
-to verify a credential to approve or deny authentication, 
+to verify a credential to approve or deny authentication,
 {\tt slurm\_auth\_get\_uid} to get the uid associated with a specific
 credential, etc.  It also must define the data structure used, a plugin
 type, a plugin version number, etc.  When a SLURM daemon is initiated, it
@@ -450,7 +450,7 @@ SLURM has a simple security model: any user of the cluster may submit
 parallel jobs to execute and cancel his own jobs.  Any user may view
 SLURM configuration and state information.  Only privileged users
 may modify the SLURM configuration, cancel any job, or perform other
-restricted activities.  Privileged users in SLURM include the users 
+restricted activities.  Privileged users in SLURM include the users
 {\em root} and {\em SlurmUser} (as defined in the SLURM configuration file).
 If permission to modify SLURM configuration is required by others, set-uid
 programs may be used to grant specific permissions to specific users.
@@ -461,7 +461,7 @@ Historically, inter-node authentication has been accomplished via the use
 of reserved ports and set-uid programs. In this scheme, daemons check the
 source port of a request to ensure that it is less than a certain value
 and thus only accessible by {\em root}. The communications over that
-connection are then implicitly trusted.  Because reserved ports are a 
+connection are then implicitly trusted.  Because reserved ports are a
 limited resource and set-uid programs are a possible security concern,
 we have employed a credential-based authentication scheme that
 does not depend on reserved ports. In this design, a SLURM authentication
@@ -472,12 +472,12 @@ and gid from the credential as the verified identity of the sender.
 
 The actual implementation of the SLURM authentication credential is
 relegated to an ``auth'' plugin.  We presently have implemented three
-functional authentication plugins: authd\cite{Authd2002}, 
+functional authentication plugins: authd\cite{Authd2002},
 Munge, and none.  The ``none'' authentication type employs a null
 credential and is only suitable for testing and networks where security
 is not a concern. Both the authd and Munge implementations employ
 cryptography to generate a credential for the requesting user that
-may then be authoritatively verified on any remote nodes. However, 
+may then be authoritatively verified on any remote nodes. However,
 authd assumes a secure network and Munge does not.  Other authentication
 implementations, such as a credential based on Kerberos, should be easy
 to develop using the auth plugin API.
@@ -512,7 +512,7 @@ Unix groups using a {\em AllowGroups} specification.
 
 In this example a user wishes to run a job in batch mode, in which \srun\
 returns immediately and the job executes in the background when resources
-are available.  The job is a two-node run of script containing {\em mping}, 
+are available.  The job is a two-node run of script containing {\em mping},
 a simple MPI application.  The user submits the job:
 
 \begin{verbatim}
@@ -531,17 +531,17 @@ current working directory, and command line option information. By
 default, stdout and stderr are sent to files in the current working
 directory and stdin is copied from {\tt /dev/null}.
 
-The controller consults the Partition Manager to test whether the job 
+The controller consults the Partition Manager to test whether the job
 will ever be able to run.  If the user has requested a non-existent partition,
-a non-existent constraint, 
+a non-existent constraint,
 etc., the Partition Manager returns an error and the request is discarded.
-The failure is reported to \srun\, which informs the user and exits, for 
+The failure is reported to \srun\, which informs the user and exits, for
 example:
 \begin{verbatim}
 srun: error: Unable to allocate resources: Invalid partition name
 \end{verbatim}
 
-On successful submission, the controller assigns the job a unique 
+On successful submission, the controller assigns the job a unique
 {\em SLURM job id}, adds it to the job queue, and returns the job's
 job id to \srun\, which reports this to user and exits, returning
 success to the user's shell:
@@ -587,20 +587,20 @@ copied to a file in the current working directory by \srun :
 /path/to/cwd/slurm-42.out
 \end{verbatim}
 
-The user may examine output files at any time if they reside 
+The user may examine output files at any time if they reside
 in a globally accessible directory. In this example
-{\tt slurm-42.out} would  contain the output of the job script's two 
+{\tt slurm-42.out} would  contain the output of the job script's two
 commands (hostname and mping):
 
 \begin{verbatim}
 dev6
 dev7
-  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s                     
-  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s                     
-  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s                     
-  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s                     
+  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s
+  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s
+  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s
+  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s
   ...
-  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s              
+  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s
 \end{verbatim}
 
 When the tasks complete execution, \srun\ is notified by \slurmd\ of
@@ -609,14 +609,14 @@ Manager and exits.  \slurmd\ detects when the job script terminates and
 notifies the Job Manager of its exit status and begins cleanup.  The Job
 Manager directs the {\tt slurmd}s formerly assigned to the job to run
 the SLURM epilog program (if one is configured) as user {\em root}.
-Finally, the Job Manager releases the resources allocated to job {\em 42} 
+Finally, the Job Manager releases the resources allocated to job {\em 42}
 and updates the job status to {\em complete}. The record of a job's
 existence is eventually purged.
 
 \subsection{Example:  Executing an Interactive Job}
 
-In this example a user wishes to run the same {\em mping} command 
-in interactive mode, in which \srun\ blocks while the job executes 
+In this example a user wishes to run the same {\em mping} command
+in interactive mode, in which \srun\ blocks while the job executes
 and stdout/stderr of the job are copied onto stdout/stderr of {\tt srun}.
 The user submits the job, this time without the {\tt batch} option:
 
@@ -636,12 +636,12 @@ job script. In this case, the user sees the program output on stdout of
 {\tt srun}:
 
 \begin{verbatim}
-  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s                     
-  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s                     
-  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s                     
-  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s                     
+  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s
+  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s
+  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s
+  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s
   ...
-  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s              
+  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s
 \end{verbatim}
 
 When the job terminates, \srun\ receives an EOF (End Of File) on each
@@ -651,11 +651,11 @@ complete and terminates. The controller contacts all {\tt slurmd}s allocated
 to the terminating job and issues a request to run the SLURM epilog,
 then releases the job's resources.
 
-Most signals received by \srun\ while the job is executing are 
+Most signals received by \srun\ while the job is executing are
 transparently forwarded to the remote tasks. SIGINT (generated by
-Control-C) is a special case and only causes \srun\ to report 
+Control-C) is a special case and only causes \srun\ to report
 remote task status unless two SIGINTs are received in rapid succession.
-SIGQUIT (Control-$\backslash$) is another special case. SIGQUIT forces 
+SIGQUIT (Control-$\backslash$) is another special case. SIGQUIT forces
 termination of the running job.
 
 \section{Slurmctld Design}
@@ -745,7 +745,7 @@ component.  Data associated with a partition includes:
 
 \begin{itemize}
 \item Name
-\item RootOnly flag to indicate that only users {\em root} or 
+\item RootOnly flag to indicate that only users {\em root} or
 {\tt SlurmUser} may allocate resources in this partition (for any user)
 \item List of associated nodes
 \item State of partition (UP or DOWN)
@@ -813,7 +813,7 @@ which has an associated bit map. Usable node configuration bitmaps would
 be ANDed with the selected partitions bit map ANDed with the UP node
 bit map and possibly ANDed with the IDLE node bit map (this last test
 depends on the desire to share resources).  This method can eliminate
-tens of thousands of individual node configuration comparisons that 
+tens of thousands of individual node configuration comparisons that
 would otherwise be required in large heterogeneous clusters.
 
 The actual selection of nodes for allocation to a job is currently tuned
@@ -856,10 +856,10 @@ configuration file is shown in Table~\ref{sample_config}.
 
 \begin{tabular}[c]{c}
 \\
-\fbox{ 
+\fbox{
    \begin{minipage}[c]{0.8\linewidth}
-     {\tiny \verbatiminput{sample.config} } 
-   \end{minipage} 
+     {\tiny \verbatiminput{sample.config} }
+   \end{minipage}
 }
 \\
 \end{tabular}
@@ -882,8 +882,8 @@ There are a multitude of parameters associated with each job, including:
 \item Node constraints (processors, memory, features, etc.)
 \end{itemize}
 
-Job records have an associated hash table for rapidly locating 
-specific records. They also have bit maps of requested and/or 
+Job records have an associated hash table for rapidly locating
+specific records. They also have bit maps of requested and/or
 allocated nodes (as described above).
 
 The core functions supported by the Job Manager include:
@@ -891,10 +891,10 @@ The core functions supported by the Job Manager include:
 \item Request resource (job may be queued)
 \item Reset priority of a job
 \item Status job (including node list, memory and CPU use data)
-\item Signal job (send arbitrary signal to all processes associated 
+\item Signal job (send arbitrary signal to all processes associated
       with a job)
 \item Terminate job (remove all processes)
-\item Change node count of running job (could fail if insufficient 
+\item Change node count of running job (could fail if insufficient
 resources are available)
 %\item Preempt/resume job  (future)
 %\item Checkpoint/restart job (future)
@@ -910,25 +910,25 @@ or node state might permit the scheduling of a job.
 We are aware that this scheduling algorithm does not satisfy the needs
 of many customers, and we provide the means for establishing other
 scheduling algorithms. Before a newly arrived job is placed into the
-queue, an external scheduler plugin assigns its initial priority.  
+queue, an external scheduler plugin assigns its initial priority.
 A plugin function is also called at the start of each scheduling
 cycle to modify job or system state as desired.  SLURM APIs permit an
 external entity to alter the priorities of jobs at any time and re-order
 the queue as desired.  The Maui Scheduler \cite{Jackson2001,Maui2002}
 is one example of an external scheduler suitable for use with SLURM.
 
-LLNL uses DPCS \cite{DPCS2002} as SLURM's external scheduler. 
-DPCS is a meta-scheduler with flexible scheduling algorithms that 
-suit our needs well. 
+LLNL uses DPCS \cite{DPCS2002} as SLURM's external scheduler.
+DPCS is a meta-scheduler with flexible scheduling algorithms that
+suit our needs well.
 It also provides the scalability required for this application.
-DPCS maintains pending job state internally and only transfers the 
-jobs to SLURM (or another underlying resources manager) only when 
-they are to begin execution. 
-By not transferring jobs to a particular resources manager earlier, 
-jobs are assured of being initiated on the first resource satisfying 
-their requirements, whether a Linux cluster with SLURM or an IBM SP 
+DPCS maintains pending job state internally and only transfers the
+jobs to SLURM (or another underlying resources manager) only when
+they are to begin execution.
+By not transferring jobs to a particular resources manager earlier,
+jobs are assured of being initiated on the first resource satisfying
+their requirements, whether a Linux cluster with SLURM or an IBM SP
 with LoadLeveler (assuming a highly flexible application).
-This mode of operation may also be suitable for computational grid 
+This mode of operation may also be suitable for computational grid
 schedulers.
 
 In a future release, the Job Manager will collect resource consumption
@@ -995,7 +995,7 @@ running crashes, the job continues execution and no output is lost.
 
 The \slurmd\ daemon is a multi-threaded daemon for managing user jobs
 and monitoring system state.  Upon initiation it reads the configuration
-file, recovers any saved state, captures system state, 
+file, recovers any saved state, captures system state,
 attempts an initial connection to the SLURM
 controller, and awaits requests.  It services requests for system state,
 accounting information, job initiation, job state, job termination,
@@ -1013,18 +1013,18 @@ to {\tt slurmctld}.
 %FUTURE:  Another thread is
 %created to capture CPU, real-memory and virtual-memory consumption from
 %the process table entries.  Differences in resource utilization values
-%from one process table snapshot to the next are accumulated. \slurmd\ 
+%from one process table snapshot to the next are accumulated. \slurmd\
 %insures these accumulated values are not decremented if resource
 %consumption for a user happens to decrease from snapshot to snapshot,
 %which would simply reflect the termination of one or more processes.
 %Both the real and virtual memory high-water marks are recorded and
 %the integral of memory consumption (e.g. megabyte-hours).  Resource
 %consumption is grouped by uid and SLURM job id (if any). Data
-%is collected for system users ({\em root}, {\em ftp}, {\em ntp}, 
-%etc.) as well as customer accounts. 
+%is collected for system users ({\em root}, {\em ftp}, {\em ntp},
+%etc.) as well as customer accounts.
 %The intent is to capture all resource use including
 %kernel, idle and down time.  Upon request, the accumulated values are
-%uploaded to \slurmctld\ and cleared.  
+%uploaded to \slurmctld\ and cleared.
 
 \slurmd\ accepts requests from \srun\ and \slurmctld\ to initiate
 and terminate user jobs. The initiate job request contains such
@@ -1067,38 +1067,38 @@ or a privileged user.
 
 \subsection{scontrol}
 
-\scontrol\ is a tool meant for SLURM administration by user {\em root}. 
+\scontrol\ is a tool meant for SLURM administration by user {\em root}.
 It provides the following capabilities:
 \begin{itemize}
-\item {\tt Shutdown}: Cause \slurmctld\ and \slurmd\ to save state 
+\item {\tt Shutdown}: Cause \slurmctld\ and \slurmd\ to save state
 and terminate.
-\item {\tt Reconfigure}: Cause \slurmctld\ and \slurmd\ to reread the 
+\item {\tt Reconfigure}: Cause \slurmctld\ and \slurmd\ to reread the
 configuration file.
 \item {\tt Ping}: Display the status of primary and backup \slurmctld\ daemons.
-\item {\tt Show Configuration Parameters}: Display the values of general SLURM 
-configuration parameters such as locations of files and values of timers.  
-\item {\tt Show Job State}: Display the state information of a particular job 
+\item {\tt Show Configuration Parameters}: Display the values of general SLURM
+configuration parameters such as locations of files and values of timers.
+\item {\tt Show Job State}: Display the state information of a particular job
 or all jobs in the system.
-\item {\tt Show Job Step State}: Display the state information of a particular 
+\item {\tt Show Job Step State}: Display the state information of a particular
 job step or all job steps in the system.
-\item {\tt Show Node State}: Display the state and configuration information 
-of a particular node, a set of nodes (using numeric ranges syntax to 
+\item {\tt Show Node State}: Display the state and configuration information
+of a particular node, a set of nodes (using numeric ranges syntax to
 identify their names), or all nodes.
-\item {\tt Show Partition State}: Display the state and configuration 
+\item {\tt Show Partition State}: Display the state and configuration
 information of a particular partition or all partitions.
-\item {\tt Update Job State}: Update the state information of a particular job 
-in the system. Note that not all state information can be changed in this 
+\item {\tt Update Job State}: Update the state information of a particular job
+in the system. Note that not all state information can be changed in this
 fashion (e.g., the nodes allocated to a job).
 \item {\tt Update Node State}: Update the state of a particular node. Note
 that not all state information can be changed in this fashion (e.g., the
 amount of memory configured on a node). In some cases, you may need
 to modify the SLURM configuration file and cause it to be reread
-using the ``Reconfigure'' command described above.  
+using the ``Reconfigure'' command described above.
 \item {\tt Update Partition State}: Update the state of a partition
 node. Note that not all state information can be changed in this fashion
 (e.g., the default partition). In some cases, you may need to modify
 the SLURM configuration file and cause it to be reread using the
-``Reconfigure'' command described above.  
+``Reconfigure'' command described above.
 \end{itemize}
 
 \subsection{squeue}
@@ -1106,14 +1106,14 @@ the SLURM configuration file and cause it to be reread using the
 \squeue\ reports the state of SLURM jobs.  It can filter these
 jobs input specification of job state (RUN, PENDING, etc.), job id,
 user name, job name, etc.  If no specification is supplied, the state of
-all pending and running jobs is reported. 
+all pending and running jobs is reported.
 \squeue\ also has a variety of sorting and output options.
 
 \subsection{sinfo}
 
 \sinfo\ reports the state of SLURM partitions and nodes.  By default,
 it reports a summary of partition state with node counts and a summary
-of the configuration of those nodes.  A variety of sorting and 
+of the configuration of those nodes.  A variety of sorting and
 output formatting options exist.
 
 \subsection{srun}
@@ -1182,7 +1182,7 @@ later execution; {\em allocate}, in which \srun\ requests resources from
 the SLURM controller and spawns a shell with access to those resources;
 {\em attach}, in which \srun\ attaches to a currently
 running job and displays stdout/stderr in real time from the remote
-tasks.  
+tasks.
 
 % FUTURE:
 % An interactive job may also be forced into the background with a special
@@ -1204,13 +1204,13 @@ stderr are displayed on the user's terminal in real time, and stdin and
 signals may be forwarded from the  terminal transparently to the remote
 tasks. The second mode is {\em batch} or {\em queued} mode, in which the job is
 queued until the request for resources can be satisfied, at which time the
-job is run by SLURM as the submitting user. In the third mode, {\em allocate} 
+job is run by SLURM as the submitting user. In the third mode, {\em allocate}
 mode, a job is allocated to the requesting user, under which the user may
 manually run job steps via a script or in a sub-shell spawned by \srun .
 
 \begin{figure}[tb]
 \centerline{\epsfig{file=../figures/connections.eps,scale=0.35}}
-\caption{\small Job initiation connections overview. 1. \srun\ connects to 
+\caption{\small Job initiation connections overview. 1. \srun\ connects to
          \slurmctld\ requesting resources. 2. \slurmctld\ issues a response,
 	 with list of nodes and job step credential. 3. \srun\ opens a listen
 	 port for job IO connections, then sends a run job step
@@ -1227,8 +1227,8 @@ list of allocated nodes, job step credential, etc.  if the request is granted,
 \srun\ then initializes a listen port for stdio connections and connects
 to the {\tt slurmd}s on the allocated nodes requesting that the remote
 processes be initiated. The {\tt slurmd}s begin execution of the tasks and
-connect back to \srun\ for stdout and stderr. This process is described 
-in more detail below. Details of the batch and allocate modes of operation 
+connect back to \srun\ for stdout and stderr. This process is described
+in more detail below. Details of the batch and allocate modes of operation
 are not presented due to space constraints.
 
 \subsection{Interactive Job Initiation}
@@ -1256,8 +1256,8 @@ instantly if the user has requested that \srun\ block until resources are
 available.  When resources are available for the user's job, \slurmctld\
 replies with a job step credential, list of nodes that were allocated,
 cpus per node, and so on. \srun\ then sends a message each \slurmd\ on
-the allocated nodes requesting that a job step be initiated. 
-The \slurmd\ daemons verify that the job is valid using the forwarded job 
+the allocated nodes requesting that a job step be initiated.
+The \slurmd\ daemons verify that the job is valid using the forwarded job
 step credential and then respond to \srun .
 
 Each \slurmd\ invokes a job manager process to handle the request, which
@@ -1299,11 +1299,11 @@ epilog ran successfully, the nodes are returned to the partition.
 \label{timing}
 \end{figure}
 
-We were able to perform some SLURM tests on a 1000-node cluster 
-in November 2002. Some development was still underway at that time 
-and tuning had not been performed. The results for executing the 
-program {\em /bin/hostname} on two tasks per node and various node 
-counts are shown in Figure~\ref{timing}. We found SLURM performance 
+We were able to perform some SLURM tests on a 1000-node cluster
+in November 2002. Some development was still underway at that time
+and tuning had not been performed. The results for executing the
+program {\em /bin/hostname} on two tasks per node and various node
+counts are shown in Figure~\ref{timing}. We found SLURM performance
 to be comparable to the
 Quadrics Resource Management System (RMS) \cite{Quadrics2002} for all
 job sizes and about 80 times faster than IBM LoadLeveler\cite{LL2002}
@@ -1311,8 +1311,8 @@ at tested job sizes.
 
 \section{Future Plans}
 
-SLURM begin production use on LLNL Linux clusters in March 2003 
-and is available from our web site\cite{SLURM2003}. 
+SLURM begin production use on LLNL Linux clusters in March 2003
+and is available from our web site\cite{SLURM2003}.
 
 While SLURM is able to manage 1000 nodes without difficulty using
 sockets and Ethernet, we are reviewing other communication mechanisms
@@ -1323,7 +1323,7 @@ including a broadcast capability.  STORM only supports the Quadrics
 Elan interconnnect at present, but it does offer the promise of improved
 performance and scalability.
 
-Looking ahead, we anticipate adding support for additional 
+Looking ahead, we anticipate adding support for additional
 interconnects (InfiniBand and the IBM
 Blue Gene \cite{BlueGene2002} system\footnote{Blue Gene has a different
 interconnect than any supported by SLURM and a 3-D topography with
@@ -1340,19 +1340,19 @@ use by each parallel job is planned for a future release.
 SLURM is jointly developed by LLNL and Linux NetworX.
 Contributors to SLURM development include:
 \begin{itemize}
-\item Jay Windley of Linux NetworX for his development of the plugin 
+\item Jay Windley of Linux NetworX for his development of the plugin
 mechanism and work on the security components
 \item Joey Ekstrom for his work developing the user tools
 \item Kevin Tew for his work developing the communications infrastructure
-\item Jim Garlick for his development of the Quadrics Elan interface and 
+\item Jim Garlick for his development of the Quadrics Elan interface and
 technical guidance
-\item Gregg Hommes, Bob Wood, and Phil Eckert for their help designing the 
+\item Gregg Hommes, Bob Wood, and Phil Eckert for their help designing the
 SLURM APIs
 \item Mark Seager and Greg Tomaschke for their support of this project
 \item Chris Dunlap for technical guidance
 \item David Jackson of Linux NetworX for technical guidance
-\item Fabrizio Petrini of Los Alamos National Laboratory for his work to 
-integrate SLURM with STORM communications 
+\item Fabrizio Petrini of Los Alamos National Laboratory for his work to
+integrate SLURM with STORM communications
 \end{itemize}
 
 %\appendix
@@ -1363,7 +1363,7 @@ integrate SLURM with STORM communications
 %\item[Authd]    User authentication mechanism
 %\item[DCE]	Distributed Computing Environment
 %\item[DFS]	Distributed File System (part of DCE)
-%\item[DPCS]	Distributed Production Control System, a meta-batch system 
+%\item[DPCS]	Distributed Production Control System, a meta-batch system
 %		and resource manager developed by LLNL
 %\item[Globus]	Grid scheduling infrastructure
 %\item[Kerberos]	Authentication mechanism
diff --git a/doc/clusterworld/sample.config b/doc/clusterworld/sample.config
index e0d5f2f38b79bf76c358ab5f27e0041542ea2cf0..7cb57f8300fcbdab996790816f3bac2c7944f467 100644
--- a/doc/clusterworld/sample.config
+++ b/doc/clusterworld/sample.config
@@ -1,9 +1,9 @@
-# 
+#
 # Sample /etc/slurm.conf
 # Author: John Doe
 # Date: 11/06/2001
 
-ControlMachine=lx0000   ControlAddr=elx0000 
+ControlMachine=lx0000   ControlAddr=elx0000
 BackupController=lx0001 BackupAddr=elx0001
 
 AuthType="auth/authd"
diff --git a/doc/common/acm.cls b/doc/common/acm.cls
index 440a28bfeca5581c1335a9c8de21b1e7d7da1c50..94f96a2f137b88add9fa09c296a31140843a5b1c 100644
--- a/doc/common/acm.cls
+++ b/doc/common/acm.cls
@@ -1,1357 +1,1357 @@
-% "WWW2005-submission.CLS" - VERSION 1.4
-% "COMPATIBLE" WITH THE "ACM_PROC_ARTICLE-SP.CLS" V2.6SP
-% PRODUCES A 'TIGHTER' PAPER AND DOES INCLUDE A RELEASE STATEMENT
-% SPECIFICALLY TAILORED FOR WWW'YY. (As originally requested by Sheridan Printing 1/24/2002)
-% Original by Gerald Murray January 24th. 2002
-% Improved on 11/11/2002 - GM
-% Modified for "www2004" 1/26/2004 - GM
-% ---- Start of 'updates'  ----
-%
-% Allowance made to switch default fonts between those systems using
-% METAFONT and those using 'Type 1' or 'Truetype' fonts.
-% See LINE NUMBER 236 for details.
-% Also provided for enumerated/annotated Corollaries 'surrounded' by
-% enumerated Theorems (line 790).
-% Gerry November 11th. 1999
-%
-% Georgia fixed bug in sub-sub-section numbering in paragraphs (July 29th. 2002)
-% JS/GM fix to vertical spacing before Proofs (July 30th. 2002)
-% Superscript fix Oct. 2002
-%
-% ---- End of 'updates' ----
-%
-\def\fileversion{v1.4}          % for ACM's tracking purposes
-\def\filedate{January 26, 2004}    % Gerry Murray's tracking data
-\def\docdate {Monday 26th. January 2004} % Gerry Murray (with deltas to doc}
-\usepackage{epsfig}
-\usepackage{amssymb}
-\usepackage{amsmath}
-\usepackage{amsfonts}
-%
-% WWW-RELEASE DOCUMENT STYLE
-% G.K.M. Tobin August-October 1999
-%    adapted from ARTICLE document style by Ken Traub, Olin Shivers
-%    also using elements of esub2acm.cls
-% HEAVILY MODIFIED, SUBSEQUENTLY, BY GERRY MURRAY 2000
-% ARTICLE DOCUMENT STYLE -- Released 16 March 1988
-%    for LaTeX version 2.09
-% Copyright (C) 1988 by Leslie Lamport
-%
-%
-%%% www-release.cls is an 'ALTERNATE-RELEASE' document style for producing
-%%% two-column camera-ready pages specifically for the WWW'02 conference.
-%%% THIS FILE DOES NOT STRICTLY ADHERE TO THE SIGS (BOARD-ENDORSED)
-%%% PROCEEDINGS STYLE. It has been designed to produce a 'tighter'
-%%% paper in response to concerns over page budgets.
-%%% The main features of this style are:
-%%%
-%%% 1)  Two columns.
-%%% 2)  Side and top margins of 4.5pc, bottom margin of 6pc, column gutter of
-%%%     2pc, hence columns are 20pc wide and 55.5pc tall.  (6pc =3D 1in, approx)
-%%% 3)  First page has title information, and an extra 6pc of space at the
-%%%     bottom of the first column for the ACM copyright notice.
-%%% 4)  Text is 9pt on 10pt baselines; titles (except main) are 9pt bold.
-%%%
-%%%
-%%% There are a few restrictions you must observe:
-%%%
-%%% 1)  You cannot change the font size; ACM wants you to use 9pt.
-%%% 3)  You must start your paper with the \maketitle command.  Prior to the
-%%%     \maketitle you must have \title and \author commands.  If you have a
-%%%     \date command it will be ignored; no date appears on the paper, since
-%%%     the proceedings will have a date on the front cover.
-%%% 4)  Marginal paragraphs, tables of contents, lists of figures and tables,
-%%%     and page headings are all forbidden.
-%%% 5)  The `figure' environment will produce a figure one column wide; if you
-%%%     want one that is two columns wide, use `figure*'.
-%%%
-%
-%%%
-%%% WARNING:
-%%% Some dvi-ps converters heuristically allow chars to drift from their
-%%% true positions a few pixels. This may be noticeable with the 9pt sans-serif
-%%% bold font used for section headers.
-%%% You may turn this hackery off via the -e option:
-%%%     dvips -e 0 foo.dvi >foo.ps
-%%%
-\typeout{Document Class 'www2005-submission' From www-release by Gerry Murray}
-\typeout{Based in part upon document Style `acmconf' <22 May 89>. Hacked 4/91 by}
-\typeout{shivers@cs.cmu.edu, 4/93 by theobald@cs.mcgill.ca}
-\typeout{Excerpts were taken from (Journal Style) 'esub2acm.cls'.}
-\typeout{****** Bugs/comments/suggestions/technicalities to Gerry Murray -- murray@hq.acm.org ******}
-\typeout{Questions on the style, SIGS policies, etc. to Julie Goetz goetz@acm.org or Adrienne Griscti griscti@acm.org}
-\oddsidemargin 4.5pc
-\evensidemargin 4.5pc
-\advance\oddsidemargin by -1in  % Correct for LaTeX gratuitousness
-\advance\evensidemargin by -1in % Correct for LaTeX gratuitousness
-\marginparwidth 0pt             % Margin pars are not allowed.
-\marginparsep 11pt              % Horizontal space between outer margin and
-                                % marginal note
-
-                                % Top of page:
-\topmargin 4.5pc                % Nominal distance from top of page to top of
-                                % box containing running head.
-\advance\topmargin by -1in      % Correct for LaTeX gratuitousness
-\headheight 0pt                 % Height of box containing running head.
-\headsep 0pt                    % Space between running head and text.
-                                % Bottom of page:
-\footskip 30pt                  % Distance from baseline of box containing foot
-                                % to baseline of last line of text.
-\@ifundefined{footheight}{\newdimen\footheight}{}% this is for LaTeX2e
-\footheight 12pt                % Height of box containing running foot.
-
-%% Must redefine the top margin so there's room for headers and
-%% page numbers if you are using the preprint option. Footers
-%% are OK as is. Olin.
-\advance\topmargin by -37pt     % Leave 37pt above text for headers
-\headheight 12pt                % Height of box containing running head.
-\headsep 25pt                   % Space between running head and text.
-
-\textheight 666pt       % 9 1/4 column height
-\textwidth 42pc         % Width of text line.
-                        % For two-column mode:
-\columnsep 2pc          %    Space between columns
-\columnseprule 0pt      %    Width of rule between columns.
-\hfuzz 1pt              % Allow some variation in column width, otherwise it's
-                        % too hard to typeset in narrow columns.
-
-\footnotesep 5.6pt      % Height of strut placed at the beginning of every
-                        % footnote =3D height of normal \footnotesize strut,
-                        % so no extra space between footnotes.
-
-\skip\footins 8.1pt plus 4pt minus 2pt  % Space between last line of text and
-                                        % top of first footnote.
-\floatsep 11pt plus 2pt minus 2pt       % Space between adjacent floats moved
-                                        % to top or bottom of text page.
-\textfloatsep 18pt plus 2pt minus 4pt   % Space between main text and floats
-                                        % at top or bottom of page.
-\intextsep 11pt plus 2pt minus 2pt      % Space between in-text figures and
-                                        % text.
-\@ifundefined{@maxsep}{\newdimen\@maxsep}{}% this is for LaTeX2e
-\@maxsep 18pt                           % The maximum of \floatsep,
-                                        % \textfloatsep and \intextsep (minus
-                                        % the stretch and shrink).
-\dblfloatsep 11pt plus 2pt minus 2pt    % Same as \floatsep for double-column
-                                        % figures in two-column mode.
-\dbltextfloatsep 18pt plus 2pt minus 4pt% \textfloatsep for double-column
-                                        % floats.
-\@ifundefined{@dblmaxsep}{\newdimen\@dblmaxsep}{}% this is for LaTeX2e
-\@dblmaxsep 18pt                        % The maximum of \dblfloatsep and
-                                        % \dbltexfloatsep.
-\@fptop 0pt plus 1fil    % Stretch at top of float page/column. (Must be
-                         % 0pt plus ...)
-\@fpsep 8pt plus 2fil    % Space between floats on float page/column.
-\@fpbot 0pt plus 1fil    % Stretch at bottom of float page/column. (Must be
-                         % 0pt plus ... )
-\@dblfptop 0pt plus 1fil % Stretch at top of float page. (Must be 0pt plus ...)
-\@dblfpsep 8pt plus 2fil % Space between floats on float page.
-\@dblfpbot 0pt plus 1fil % Stretch at bottom of float page. (Must be
-                         % 0pt plus ... )
-\marginparpush 5pt       % Minimum vertical separation between two marginal
-                         % notes.
-
-\parskip 0pt plus 1pt            % Extra vertical space between paragraphs.
-\parindent 9pt  % GM July 2000 / was 0pt - width of paragraph indentation.
-\partopsep 2pt plus 1pt minus 1pt% Extra vertical space, in addition to
-                                 % \parskip and \topsep, added when user
-                                 % leaves blank line before environment.
-
-\@lowpenalty   51       % Produced by \nopagebreak[1] or \nolinebreak[1]
-\@medpenalty  151       % Produced by \nopagebreak[2] or \nolinebreak[2]
-\@highpenalty 301       % Produced by \nopagebreak[3] or \nolinebreak[3]
-
-\@beginparpenalty -\@lowpenalty % Before a list or paragraph environment.
-\@endparpenalty   -\@lowpenalty % After a list or paragraph environment.
-\@itempenalty     -\@lowpenalty % Between list items.
-
-\@namedef{ds@10pt}{\@latexerr{The `10pt' option is not allowed in the `acmconf'
-  document style.}\@eha}
-\@namedef{ds@11pt}{\@latexerr{The `11pt' option is not allowed in the `acmconf'
-  document style.}\@eha}
-\@namedef{ds@12pt}{\@latexerr{The `12pt' option is not allowed in the `acmconf'
-  document style.}\@eha}
-
-\@options
-
-\lineskip 2pt           % \lineskip is 1pt for all font sizes.
-\normallineskip 2pt
-\def\baselinestretch{1}
-
-\abovedisplayskip 9pt plus2pt minus4.5pt%
-\belowdisplayskip \abovedisplayskip
-\abovedisplayshortskip  \z@ plus3pt%
-\belowdisplayshortskip  5.4pt plus3pt minus3pt%
-\let\@listi\@listI     % Setting of \@listi added 9 Jun 87
-
-\def\small{\@setsize\small{9pt}\viiipt\@viiipt
-\abovedisplayskip 7.6pt plus 3pt minus 4pt%
-\belowdisplayskip \abovedisplayskip
-\abovedisplayshortskip \z@ plus2pt%
-\belowdisplayshortskip 3.6pt plus2pt minus 2pt
-\def\@listi{\leftmargin\leftmargini %% Added 22 Dec 87
-\topsep 4pt plus 2pt minus 2pt\parsep 2pt plus 1pt minus 1pt
-\itemsep \parsep}}
-
-\def\footnotesize{\@setsize\footnotesize{9pt}\ixpt\@ixpt
-\abovedisplayskip 6.4pt plus 2pt minus 4pt%
-\belowdisplayskip \abovedisplayskip
-\abovedisplayshortskip \z@ plus 1pt%
-\belowdisplayshortskip 2.7pt plus 1pt minus 2pt
-\def\@listi{\leftmargin\leftmargini %% Added 22 Dec 87
-\topsep 3pt plus 1pt minus 1pt\parsep 2pt plus 1pt minus 1pt
-\itemsep \parsep}}
-
-\newcount\aucount
-\newcount\originalaucount
-\newdimen\auwidth
-\auwidth=\textwidth
-\newdimen\auskip
-\newcount\auskipcount
-\newdimen\auskip
-\global\auskip=1pc
-\newdimen\allauboxes
-\allauboxes=\auwidth
-\newtoks\addauthors
-\newcount\addauflag
-\global\addauflag=0 %Haven't shown additional authors yet
-
-\newtoks\subtitletext
-\gdef\subtitle#1{\subtitletext={#1}}
-
-\gdef\additionalauthors#1{\addauthors={#1}}
-
-\gdef\numberofauthors#1{\global\aucount=#1
-\ifnum\aucount>3\global\originalaucount=\aucount \global\aucount=3\fi %g}
-\global\auskipcount=\aucount\global\advance\auskipcount by 1
-\global\multiply\auskipcount by 2
-\global\multiply\auskip by \auskipcount
-\global\advance\auwidth by -\auskip
-\global\divide\auwidth by \aucount}
-
-% \and was modified to count the number of authors.  GKMT 12 Aug 1999
-\def\alignauthor{%                  % \begin{tabular}
-\end{tabular}%
-  \begin{tabular}[t]{p{\auwidth}}\centering}%
-
-%  *** NOTE *** NOTE *** NOTE *** NOTE ***
-%  If you have 'font problems' then you may need
-%  to change these, e.g. 'arialb' instead of "arialbd".
-%  Gerry Murray 11/11/1999
-%  *** OR ** comment out block A and activate block B or vice versa.
-% **********************************************
-%
-%  -- Start of block A -- (Type 1 or Truetype fonts)
-%\newfont{\secfnt}{timesbd at 12pt} % was timenrb originally - now is timesbd
-%\newfont{\secit}{timesbi at 12pt}   %13 Jan 00 gkmt
-%\newfont{\subsecfnt}{timesi at 11pt} % was timenrri originally - now is timesi
-%\newfont{\subsecit}{timesbi at 11pt} % 13 Jan 00 gkmt -- was times changed to timesbi gm 2/4/2000
-%                         % because "normal" is italic, "italic" is Roman
-%\newfont{\ttlfnt}{arialbd at 18pt} % was arialb originally - now is arialbd
-%\newfont{\ttlit}{arialbi at 18pt}    % 13 Jan 00 gkmt
-%\newfont{\subttlfnt}{arial at 14pt} % was arialr originally - now is arial
-%\newfont{\subttlit}{ariali at 14pt} % 13 Jan 00 gkmt
-%\newfont{\subttlbf}{arialbd at 14pt}  % 13 Jan 00 gkmt
-%\newfont{\aufnt}{arial at 12pt} % was arialr originally - now is arial
-%\newfont{\auit}{ariali at 12pt} % 13 Jan 00 gkmt
-%\newfont{\affaddr}{arial at 10pt} % was arialr originally - now is arial
-%\newfont{\affaddrit}{ariali at 10pt} %13 Jan 00 gkmt
-%\newfont{\eaddfnt}{arial at 12pt} % was arialr originally - now is arial
-%\newfont{\ixpt}{times at 9pt} % was timenrr originally - now is times
-%\newfont{\confname}{timesi at 8pt} % was timenrri - now is timesi
-%\newfont{\crnotice}{times at 8pt} % was timenrr originally - now is times
-%\newfont{\ninept}{times at 9pt} % was timenrr originally - now is times
-% *********************************************
-%  -- End of block A --
-%
-%
-% -- Start of block B -- METAFONT
-% +++++++++++++++++++++++++++++++++++++++++++++
-% Next (default) block for those using Metafont
-% Gerry Murray 11/11/1999
-% *** THIS BLOCK FOR THOSE USING METAFONT *****
-% *********************************************
-\newfont{\secfnt}{ptmb at 12pt}
-\newfont{\secit}{ptmbi at 12pt}    %13 Jan 00 gkmt
-\newfont{\subsecfnt}{ptmri at 11pt}
-\newfont{\subsecit}{ptmbi at 11pt}  % 13 Jan 00 gkmt -- was ptmr changed to ptmbi gm 2/4/2000
-                         % because "normal" is italic, "italic" is Roman
-\newfont{\ttlfnt}{phvb at 18pt}
-\newfont{\ttlit}{phvbo at 18pt}    % GM 2/4/2000
-\newfont{\subttlfnt}{phvr at 14pt}
-\newfont{\subttlit}{phvro at 14pt} % GM 2/4/2000
-\newfont{\subttlbf}{phvb at 14pt}  % 13 Jan 00 gkmt
-\newfont{\aufnt}{phvr at 12pt}
-\newfont{\auit}{phvro at 12pt}     % GM 2/4/2000
-\newfont{\affaddr}{phvr at 10pt}
-\newfont{\affaddrit}{phvro at 10pt} % GM 2/4/2000
-\newfont{\eaddfnt}{phvr at 12pt}
-\newfont{\ixpt}{ptmr at 9pt}
-\newfont{\confname}{ptmri at 8pt}
-\newfont{\crnotice}{ptmr at 8pt}
-\newfont{\ninept}{ptmr at 9pt}
-% +++++++++++++++++++++++++++++++++++++++++++++
-% -- End of block B --
-
-\def\email#1{{{\eaddfnt{\vskip 4pt#1}}}}
-
-\def\addauthorsection{\ifnum\originalaucount>3
-    \section{Additional Authors}\the\addauthors
-  \fi}
-
-\newcount\savesection
-\newcount\sectioncntr
-\global\sectioncntr=1
-
-\setcounter{secnumdepth}{3}
-
-\def\appendix{\par
-\section*{APPENDIX}
-\setcounter{section}{0}
- \setcounter{subsection}{0}
- \def\thesection{\Alph{section}} }
-
-\leftmargini 22.5pt
-\leftmarginii 19.8pt    % > \labelsep + width of '(m)'
-\leftmarginiii 16.8pt   % > \labelsep + width of 'vii.'
-\leftmarginiv 15.3pt    % > \labelsep + width of 'M.'
-\leftmarginv 9pt
-\leftmarginvi 9pt
-
-\leftmargin\leftmargini
-\labelsep 4.5pt
-\labelwidth\leftmargini\advance\labelwidth-\labelsep
-
-\def\@listI{\leftmargin\leftmargini \parsep 3.6pt plus 2pt minus 1pt%
-\topsep 7.2pt plus 2pt minus 4pt%
-\itemsep 3.6pt plus 2pt minus 1pt}
-
-\let\@listi\@listI
-\@listi
-
-\def\@listii{\leftmargin\leftmarginii
-   \labelwidth\leftmarginii\advance\labelwidth-\labelsep
-   \topsep 3.6pt plus 2pt minus 1pt
-   \parsep 1.8pt plus 0.9pt minus 0.9pt
-   \itemsep \parsep}
-
-\def\@listiii{\leftmargin\leftmarginiii
-    \labelwidth\leftmarginiii\advance\labelwidth-\labelsep
-    \topsep 1.8pt plus 0.9pt minus 0.9pt
-    \parsep \z@ \partopsep 1pt plus 0pt minus 1pt
-    \itemsep \topsep}
-
-\def\@listiv{\leftmargin\leftmarginiv
-     \labelwidth\leftmarginiv\advance\labelwidth-\labelsep}
-
-\def\@listv{\leftmargin\leftmarginv
-     \labelwidth\leftmarginv\advance\labelwidth-\labelsep}
-
-\def\@listvi{\leftmargin\leftmarginvi
-     \labelwidth\leftmarginvi\advance\labelwidth-\labelsep}
-
-\def\labelenumi{\theenumi.}
-\def\theenumi{\arabic{enumi}}
-
-\def\labelenumii{(\theenumii)}
-\def\theenumii{\alph{enumii}}
-\def\p@enumii{\theenumi}
-
-\def\labelenumiii{\theenumiii.}
-\def\theenumiii{\roman{enumiii}}
-\def\p@enumiii{\theenumi(\theenumii)}
-
-\def\labelenumiv{\theenumiv.}
-\def\theenumiv{\Alph{enumiv}}
-\def\p@enumiv{\p@enumiii\theenumiii}
-
-\def\labelitemi{$\bullet$}
-\def\labelitemii{\bf --}
-\def\labelitemiii{$\ast$}
-\def\labelitemiv{$\cdot$}
-
-\def\verse{\let\\=\@centercr
-  \list{}{\itemsep\z@ \itemindent -1.5em\listparindent \itemindent
-          \rightmargin\leftmargin\advance\leftmargin 1.5em}\item[]}
-\let\endverse\endlist
-
-\def\quotation{\list{}{\listparindent 1.5em
-    \itemindent\listparindent
-    \rightmargin\leftmargin \parsep 0pt plus 1pt}\item[]}
-\let\endquotation=\endlist
-
-\def\quote{\list{}{\rightmargin\leftmargin}\item[]}
-\let\endquote=\endlist
-
-\def\descriptionlabel#1{\hspace\labelsep \bf #1}
-\def\description{\list{}{\labelwidth\z@ \itemindent-\leftmargin
-       \let\makelabel\descriptionlabel}}
-
-\let\enddescription\endlist
-
-\def\theequation{\arabic{equation}}
-
-\arraycolsep 4.5pt   % Half the space between columns in an array environment.
-\tabcolsep 5.4pt    % Half the space between columns in a tabular environment.
-\arrayrulewidth .4pt % Width of rules in array and tabular environment.
-\doublerulesep 1.8pt % Space between adjacent rules in array or tabular env.
-
-\tabbingsep \labelsep   % Space used by the \' command.  (See LaTeX manual.)
-
-\skip\@mpfootins =\skip\footins
-
-\fboxsep =2.7pt      % Space left between box and text by \fbox and \framebox.
-\fboxrule =.4pt      % Width of rules in box made by \fbox and \framebox.
-
-\def\thepart{\Roman{part}} % Roman numeral part numbers.
-\def\thesection       {\arabic{section}}
-\def\thesubsection    {\thesection.\arabic{subsection}}
-%\def\thesubsubsection {\thesubsection.\arabic{subsubsection}} % GM 7/30/2002
-%\def\theparagraph     {\thesubsubsection.\arabic{paragraph}}  % GM 7/30/2002
-\def\thesubparagraph  {\theparagraph.\arabic{subparagraph}}
-
-\def\@pnumwidth{1.55em}
-\def\@tocrmarg {2.55em}
-\def\@dotsep{4.5}
-\setcounter{tocdepth}{3}
-
-\def\tableofcontents{\@latexerr{\tableofcontents: Tables of contents are not
-  allowed in the `acmconf' document style.}\@eha}
-
-\def\l@part#1#2{\addpenalty{\@secpenalty}
-   \addvspace{2.25em plus 1pt}  % space above part line
-   \begingroup
-   \@tempdima 3em       % width of box holding part number, used by
-     \parindent \z@ \rightskip \@pnumwidth      %% \numberline
-     \parfillskip -\@pnumwidth
-     {\large \bf        % set line in \large boldface
-     \leavevmode        % TeX command to enter horizontal mode.
-     #1\hfil \hbox to\@pnumwidth{\hss #2}}\par
-     \nobreak           % Never break after part entry
-   \endgroup}
-
-\def\l@section#1#2{\addpenalty{\@secpenalty} % good place for page break
-   \addvspace{1.0em plus 1pt}   % space above toc entry
-   \@tempdima 1.5em             % width of box holding section number
-   \begingroup
-    \parindent  \z@ \rightskip \@pnumwidth
-     \parfillskip -\@pnumwidth
-     \bf                        % Boldface.
-     \leavevmode                % TeX command to enter horizontal mode.
-      \advance\leftskip\@tempdima %% added 5 Feb 88 to conform to
-      \hskip -\leftskip           %% 25 Jan 88 change to \numberline
-     #1\nobreak\hfil \nobreak\hbox to\@pnumwidth{\hss #2}\par
-   \endgroup}
-
-
-\def\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}}
-\def\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}}
-\def\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}}
-\def\l@subparagraph{\@dottedtocline{5}{10em}{5em}}
-
-\def\listoffigures{\@latexerr{\listoffigures: Lists of figures are not
-  allowed in the `acmconf' document style.}\@eha}
-
-\def\l@figure{\@dottedtocline{1}{1.5em}{2.3em}}
-
-\def\listoftables{\@latexerr{\listoftables: Lists of tables are not
-  allowed in the `acmconf' document style.}\@eha}
-\let\l@table\l@figure
-
-\def\footnoterule{\kern-3\p@
-  \hrule width .4\columnwidth
-  \kern 2.6\p@}                 % The \hrule has default height of .4pt .
-\long\def\@makefntext#1{\noindent 
-\hbox to .5em{\hss\textsuperscript{\@thefnmark}}#1}  % C. Clifton / GM Oct. 2nd. 2002
-\long\def\@maketntext#1{\noindent
-#1}
-
-\long\def\@maketitlenotetext#1#2{\noindent
-            \hbox to 1.8em{\hss$^{#1}$}#2}
-
-\setcounter{topnumber}{2}
-\def\topfraction{.7}
-\setcounter{bottomnumber}{1}
-\def\bottomfraction{.3}
-\setcounter{totalnumber}{3}
-\def\textfraction{.2}
-\def\floatpagefraction{.5}
-\setcounter{dbltopnumber}{2}
-\def\dbltopfraction{.7}
-\def\dblfloatpagefraction{.5}
-
-%
-\long\def\@makecaption#1#2{
-   \vskip \baselineskip
-   \setbox\@tempboxa\hbox{\textbf{#1: #2}}
-   \ifdim \wd\@tempboxa >\hsize % IF longer than one line:
-       \textbf{#1: #2}\par               %   THEN set as ordinary paragraph.
-     \else                      %   ELSE  center.
-       \hbox to\hsize{\hfil\box\@tempboxa\hfil}\par
-   \fi}
-
-%
-
-\long\def\@makecaption#1#2{
-   \vskip 10pt
-   \setbox\@tempboxa\hbox{\textbf{#1: #2}}
-   \ifdim \wd\@tempboxa >\hsize % IF longer than one line:
-       \textbf{#1: #2}\par                %   THEN set as ordinary paragraph.
-     \else                      %   ELSE  center.
-       \hbox to\hsize{\hfil\box\@tempboxa\hfil}
-   \fi}
-
-\@ifundefined{figure}{\newcounter {figure}} % this is for LaTeX2e
-
-\def\fps@figure{tbp}
-\def\ftype@figure{1}
-\def\ext@figure{lof}
-\def\fnum@figure{Figure \thefigure}
-\def\figure{\@float{figure}}
-\let\endfigure\end@float
-\@namedef{figure*}{\@dblfloat{figure}}
-\@namedef{endfigure*}{\end@dblfloat}
-
-\@ifundefined{table}{\newcounter {table}} % this is for LaTeX2e
-
-\def\fps@table{tbp}
-\def\ftype@table{2}
-\def\ext@table{lot}
-\def\fnum@table{Table \thetable}
-\def\table{\@float{table}}
-\let\endtable\end@float
-\@namedef{table*}{\@dblfloat{table}}
-\@namedef{endtable*}{\end@dblfloat}
-
-\newtoks\titleboxnotes
-\newcount\titleboxnoteflag
-
-\def\maketitle{\par
- \begingroup
-   \def\thefootnote{\fnsymbol{footnote}}
-   \def\@makefnmark{\hbox
-       to 0pt{$^{\@thefnmark}$\hss}}
-     \twocolumn[\@maketitle]
-\@thanks
- \endgroup
- \setcounter{footnote}{0}
- \let\maketitle\relax
- \let\@maketitle\relax
- \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\gdef\@subtitle{}\let\thanks\relax
- \@copyrightspace}
-
-%% CHANGES ON NEXT LINES
-\newif\if@ll % to record which version of LaTeX is in use
-
-\expandafter\ifx\csname LaTeXe\endcsname\relax % LaTeX2.09 is used
-\else% LaTeX2e is used, so set ll to true
-\global\@lltrue
-\fi
-
-\if@ll
-  \NeedsTeXFormat{LaTeX2e}
-  \ProvidesClass{www2005-submission} [2004/1/26 - V1.4 - based on sig-alt-release.cls]
-  \RequirePackage{latexsym}% QUERY: are these two really needed?
-  \let\dooptions\ProcessOptions
-\else
-  \let\dooptions\@options
-\fi
-%% END CHANGES
-
-\def\@height{height}
-\def\@width{width}
-\def\@minus{minus}
-\def\@plus{plus}
-\def\hb@xt@{\hbox to}
-\newif\if@faircopy
-\@faircopyfalse
-\def\ds@faircopy{\@faircopytrue}
-
-\def\ds@preprint{\@faircopyfalse}
-
-\@twosidetrue
-\@mparswitchtrue
-\def\ds@draft{\overfullrule 5\p@}
-%% CHANGE ON NEXT LINE
-\dooptions
-
-\lineskip \p@
-\normallineskip \p@
-\def\baselinestretch{1}
-\def\@ptsize{0} %needed for amssymbols.sty
-
-%% CHANGES ON NEXT LINES
-\if@ll% allow use of old-style font change commands in LaTeX2e
-\@maxdepth\maxdepth
-%
-\DeclareOldFontCommand{\rm}{\ninept\rmfamily}{\mathrm}
-\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf}
-\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt}
-\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf}
-\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit}
-\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl}
-\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc}
-\DeclareRobustCommand*{\cal}{\@fontswitch{\relax}{\mathcal}}
-\DeclareRobustCommand*{\mit}{\@fontswitch{\relax}{\mathnormal}}
-\fi
-%
-\if@ll
- \renewcommand{\rmdefault}{cmr}  % was 'ttm'
-% Note! I have also found 'mvr' to work ESPECIALLY well.
-% Gerry - October 1999
-% You may need to change your LV1times.fd file so that sc is
-% mapped to cmcsc - -for smallcaps -- that is if you decide
-% to change {cmr} to {times} above. (Not recommended)
-  \renewcommand{\@ptsize}{}
-  \renewcommand{\normalsize}{%
-    \@setfontsize\normalsize\@ixpt{10.5\p@}%\ninept%
-    \abovedisplayskip 6\p@ \@plus2\p@ \@minus\p@
-    \belowdisplayskip \abovedisplayskip
-    \abovedisplayshortskip 6\p@ \@minus 3\p@
-    \belowdisplayshortskip 6\p@ \@minus 3\p@
-    \let\@listi\@listI
-  }
-\else
-  \def\@normalsize{%changed next to 9 from 10
-    \@setsize\normalsize{9\p@}\ixpt\@ixpt
-   \abovedisplayskip 6\p@ \@plus2\p@ \@minus\p@
-    \belowdisplayskip \abovedisplayskip
-    \abovedisplayshortskip 6\p@ \@minus 3\p@
-    \belowdisplayshortskip 6\p@ \@minus 3\p@
-    \let\@listi\@listI
-  }%
-\fi
-\if@ll
-  \newcommand\scriptsize{\@setfontsize\scriptsize\@viipt{8\p@}}
-  \newcommand\tiny{\@setfontsize\tiny\@vpt{6\p@}}
-  \newcommand\large{\@setfontsize\large\@xiipt{14\p@}}
-  \newcommand\Large{\@setfontsize\Large\@xivpt{18\p@}}
-  \newcommand\LARGE{\@setfontsize\LARGE\@xviipt{20\p@}}
-  \newcommand\huge{\@setfontsize\huge\@xxpt{25\p@}}
-  \newcommand\Huge{\@setfontsize\Huge\@xxvpt{30\p@}}
-\else
-  \def\scriptsize{\@setsize\scriptsize{8\p@}\viipt\@viipt}
-  \def\tiny{\@setsize\tiny{6\p@}\vpt\@vpt}
-  \def\large{\@setsize\large{14\p@}\xiipt\@xiipt}
-  \def\Large{\@setsize\Large{18\p@}\xivpt\@xivpt}
-  \def\LARGE{\@setsize\LARGE{20\p@}\xviipt\@xviipt}
-  \def\huge{\@setsize\huge{25\p@}\xxpt\@xxpt}
-  \def\Huge{\@setsize\Huge{30\p@}\xxvpt\@xxvpt}
-\fi
-\normalsize
-
-% make aubox hsize/number of authors up to 3, less gutter
-% then showbox gutter showbox gutter showbox -- GKMT Aug 99
-\newbox\@acmtitlebox
-\def\@maketitle{\newpage
- \null
- \setbox\@acmtitlebox\vbox{%
-\baselineskip 20pt
-\vskip 2em                   % Vertical space above title.
-   \begin{center}
-    {\ttlfnt \@title\par}       % Title set in 18pt Helvetica (Arial) bold size.
-    \vskip 1.5em                % Vertical space after title.
-%This should be the subtitle.
-{\subttlfnt \the\subtitletext\par}\vskip 1.25em%\fi
-    {\baselineskip 16pt\aufnt   % each author set in \12 pt Arial, in a
-     \lineskip .5em             % tabular environment
-     \begin{tabular}[t]{c}\@author
-     \end{tabular}\par}
-    \vskip 1.5em               % Vertical space after author.
-   \end{center}}
- \dimen0=\ht\@acmtitlebox
- \advance\dimen0 by -12.75pc\relax % Increased space for title box -- KBT
- \unvbox\@acmtitlebox
- \ifdim\dimen0<0.0pt\relax\vskip-\dimen0\fi}
-
-
-\newcount\titlenotecount
-\global\titlenotecount=0
-\newtoks\tntoks
-\newtoks\tntokstwo
-\newtoks\tntoksthree
-\newtoks\tntoksfour
-\newtoks\tntoksfive
-
-\def\abstract{
-\ifnum\titlenotecount>0 % was =1
-    \insert\footins{%
-    \reset@font\footnotesize
-        \interlinepenalty\interfootnotelinepenalty
-        \splittopskip\footnotesep
-        \splitmaxdepth \dp\strutbox \floatingpenalty \@MM
-        \hsize\columnwidth \@parboxrestore
-        \protected@edef\@currentlabel{%
-        }%
-        \color@begingroup
-\ifnum\titlenotecount=1
-      \@maketntext{%
-         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\@finalstrut\strutbox}%
-\fi
-\ifnum\titlenotecount=2
-      \@maketntext{%
-      \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\@finalstrut\strutbox}%
-\fi
-\ifnum\titlenotecount=3
-      \@maketntext{%
-         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\@finalstrut\strutbox}%
-\fi
-\ifnum\titlenotecount=4
-      \@maketntext{%
-         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\S$}\rule\z@\footnotesep\ignorespaces\the\tntoksfour\@finalstrut\strutbox}%
-\fi
-\ifnum\titlenotecount=5
-      \@maketntext{%
-         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\S$}\rule\z@\footnotesep\ignorespaces\the\tntoksfour\par\@finalstrut\strutbox}%
-\@maketntext{%
-         \raisebox{4pt}{$\P$}\rule\z@\footnotesep\ignorespaces\the\tntoksfive\@finalstrut\strutbox}%
-\fi
-   \color@endgroup} %g}
-\fi
-\setcounter{footnote}{0}
-\section*{ABSTRACT}\normalsize%\ninept
-}
-
-\def\endabstract{\if@twocolumn\else\endquotation\fi}
-
-\def\keywords{\if@twocolumn
-\section*{Keywords}
-\else \small
-\quotation
-\fi}
-
-\def\terms{\if@twocolumn
-\section*{General Terms}
-\else \small
-\quotation
-\fi}
-
-% -- Classification needs to be a bit smart due to optionals - Gerry/Georgia November 2nd. 1999
-\newcount\catcount
-\global\catcount=1
-
-\def\category#1#2#3{%
-\ifnum\catcount=1
-\section*{Categories and Subject Descriptors}
-\advance\catcount by 1\else{\unskip; }\fi
-    \@ifnextchar [{\@category{#1}{#2}{#3}}{\@category{#1}{#2}{#3}[]}%
-}
-
-\def\@category#1#2#3[#4]{%
-    \begingroup
-        \let\and\relax
-            #1 [\textbf{#2}]%
-            \if!#4!%
-                \if!#3!\else : #3\fi
-            \else
-                :\space
-                \if!#3!\else #3\kern\z@---\hskip\z@\fi
-                \textit{#4}%
-            \fi
-    \endgroup
-}
-%
-
-%%% This section (written by KBT) handles the 1" box in the lower left
-%%% corner of the left column of the first page by creating a picture,
-%%% and inserting the predefined string at the bottom (with a negative
-%%% displacement to offset the space allocated for a non-existent
-%%% caption).
-%%%
-\newtoks\copyrightnotice
-\def\ftype@copyrightbox{8}
-\def\@copyrightspace{
-\@float{copyrightbox}[b]
-\begin{center}
-\setlength{\unitlength}{1pc}
-\begin{picture}(20,0) %Space for (specific) copyright notice was 20,6
-\put(0,-0.95){\crnotice{\@toappear}}
-\end{picture}
-\end{center}
-\end@float}
-
-\def\@toappear{} % Default setting blank - commands below change this.
-\long\def\toappear#1{\def\@toappear{\parbox[b]{20pc}{\baselineskip 9pt#1}}}
-\def\toappearbox#1{\def\@toappear{\raisebox{5pt}{\framebox[20pc]{\parbox[b]{19pc}{#1}}}}}
-
-\newtoks\conf
-\newtoks\confinfo
-\def\conferenceinfo#1#2{\global\conf={#1}\global\confinfo{#2}}
-
-
-\def\marginpar{\@latexerr{The \marginpar command is not allowed in the
-  `acmconf' document style.}\@eha}
-
-\mark{{}{}}     % Initializes TeX's marks
-
-\def\today{\ifcase\month\or
-  January\or February\or March\or April\or May\or June\or
-  July\or August\or September\or October\or November\or December\fi
-  \space\number\day, \number\year}
-
-\def\@begintheorem#1#2{%
-    \parskip 0pt % GM July 2000 (for tighter spacing)
-    \trivlist
-    \item[%
-        \hskip 10\p@
-        \hskip \labelsep
-        {{\sc #1}\hskip 5\p@\relax#2.}%
-    ]
-    \it
-}
-\def\@opargbegintheorem#1#2#3{%
-    \parskip 0pt % GM July 2000 (for tighter spacing)
-    \trivlist
-    \item[%
-        \hskip 10\p@
-        \hskip \labelsep
-        {\sc #1\ #2\             % This mod by Gerry to enumerate corollaries
-   \setbox\@tempboxa\hbox{(#3)}  % and bracket the 'corollary title'
-        \ifdim \wd\@tempboxa>\z@ % and retain the correct numbering of e.g. theorems
-            \hskip 5\p@\relax    % if they occur 'around' said corollaries.
-            \box\@tempboxa       % Gerry - Nov. 1999.
-        \fi.}%
-    ]
-    \it
-}
-\newif\if@qeded
-
-% -- original
-%\def\proof{%
-%  \vspace{-\parskip} % GM July 2000 (for tighter spacing)
-%    \global\@qededfalse
-%    \@ifnextchar[{\@xproof}{\@proof}%
-%}
-% -- end of original
-
-% (JSS) Fix for vertical spacing bug - Gerry Murray July 30th. 2002
-\def\proof{%
-\vspace{-\lastskip}\vspace{-\parsep}\penalty-51%
-\global\@qededfalse
-\@ifnextchar[{\@xproof}{\@proof}%
-}
-
-\def\endproof{%
-    \if@qeded\else\qed\fi
-    \endtrivlist
-}
-\def\@proof{%
-    \trivlist
-    \item[%
-        \hskip 10\p@
-        \hskip \labelsep
-        {\sc Proof.}%
-    ]
-    \ignorespaces
-}
-\def\@xproof[#1]{%
-    \trivlist
-    \item[\hskip 10\p@\hskip \labelsep{\sc Proof #1.}]%
-    \ignorespaces
-}
-\def\qed{%
-    \unskip
-    \kern 10\p@
-    \begingroup
-        \unitlength\p@
-        \linethickness{.4\p@}%
-        \framebox(6,6){}%
-    \endgroup
-    \global\@qededtrue
-}
-
-\def\newdef#1#2{%
-    \expandafter\@ifdefinable\csname #1\endcsname
-        {\@definecounter{#1}%
-         \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}%
-         \global\@namedef{#1}{\@defthm{#1}{#2}}%
-         \global\@namedef{end#1}{\@endtheorem}%
-    }%
-}
-\def\@defthm#1#2{%
-    \refstepcounter{#1}%
-    \@ifnextchar[{\@ydefthm{#1}{#2}}{\@xdefthm{#1}{#2}}%
-}
-\def\@xdefthm#1#2{%
-    \@begindef{#2}{\csname the#1\endcsname}%
-    \ignorespaces
-}
-\def\@ydefthm#1#2[#3]{%
-    \trivlist
-    \item[%
-        \hskip 10\p@
-        \hskip \labelsep
-        {\it #2%
-         \savebox\@tempboxa{#3}%
-         \ifdim \wd\@tempboxa>\z@
-            \ \box\@tempboxa
-         \fi.%
-        }]%
-    \ignorespaces
-}
-\def\@begindef#1#2{%
-    \trivlist
-    \item[%
-        \hskip 10\p@
-        \hskip \labelsep
-        {\it #1\ \rm #2.}%
-    ]%
-}
-\def\theequation{\arabic{equation}}
-
-\newcounter{part}
-\newcounter{section}
-\newcounter{subsection}[section]
-\newcounter{subsubsection}[subsection]
-\newcounter{paragraph}[subsubsection]
-\def\thepart{\Roman{part}}
-\def\thesection{\arabic{section}}
-\def\thesubsection{\thesection.\arabic{subsection}}
-\def\thesubsubsection{\thesubsection.\arabic{subsubsection}} %removed \subsecfnt 29 July 2002 gkmt
-\def\theparagraph{\thesubsubsection.\arabic{paragraph}} %removed \subsecfnt 29 July 2002 gkmt
-\newif\if@uchead
-\@ucheadfalse
-
-%% CHANGES: NEW NOTE
-%% NOTE: OK to use old-style font commands below, since they were
-%% suitably redefined for LaTeX2e
-%% END CHANGES
-\setcounter{secnumdepth}{3}
-\def\part{%
-    \@startsection{part}{9}{\z@}{-10\p@ \@plus -4\p@ \@minus -2\p@}
-        {4\p@}{\normalsize\@ucheadtrue}%
-}
-\def\section{%
-    \@startsection{section}{1}{\z@}{-10\p@ \@plus -4\p@ \@minus -2\p@}% GM
-    {4\p@}{\baselineskip 14pt\secfnt\@ucheadtrue}%
-}
-
-\def\subsection{%
-    \@startsection{subsection}{2}{\z@}{-8\p@ \@plus -2\p@ \@minus -\p@}
-    {4\p@}{\secfnt}%
-}
-\def\subsubsection{%
-    \@startsection{subsubsection}{3}{\z@}{-8\p@ \@plus -2\p@ \@minus -\p@}%
-    {4\p@}{\subsecfnt}%
-}
-\def\paragraph{%
-    \vskip 12pt\@startsection{paragraph}{3}{\z@}{6\p@ \@plus \p@}%
-    {-5\p@}{\subsecfnt}%
-}
-\let\@period=.
-\def\@startsection#1#2#3#4#5#6{%
-        \if@noskipsec  %gkmt, 11 aug 99
-        \global\let\@period\@empty
-        \leavevmode
-        \global\let\@period.%
-    \fi
-      \par %
-    \@tempskipa #4\relax
-    \@afterindenttrue
-    \ifdim \@tempskipa <\z@
-        \@tempskipa -\@tempskipa
-        \@afterindentfalse
-    \fi
-    \if@nobreak
-    \everypar{}%
-    \else
-        \addpenalty\@secpenalty
-        \addvspace\@tempskipa
-    \fi
-\parskip=0pt % GM July 2000 (non numbered) section heads
-    \@ifstar
-        {\@ssect{#3}{#4}{#5}{#6}}
-        {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}%
-}
-\def\@sect#1#2#3#4#5#6[#7]#8{%
-    \ifnum #2>\c@secnumdepth
-        \let\@svsec\@empty
-    \else
-        \refstepcounter{#1}%
-        \edef\@svsec{%
-            \begingroup
-                %\ifnum#2>2 \noexpand\rm \fi % changed to next 29 July 2002 gkmt
-			\ifnum#2>2 \noexpand#6 \fi
-                \csname the#1\endcsname
-            \endgroup
-            \ifnum #2=1\relax .\fi
-            \hskip 1em
-        }%
-    \fi
-    \@tempskipa #5\relax
-    \ifdim \@tempskipa>\z@
-        \begingroup
-            #6\relax
-            \@hangfrom{\hskip #3\relax\@svsec}%
-            \begingroup
-                \interlinepenalty \@M
-                \if@uchead
-                    \uppercase{#8}%
-                \else
-                    #8%
-                \fi
-                \par
-            \endgroup
-        \endgroup
-        \csname #1mark\endcsname{#7}%
-        \vskip -12pt  %gkmt, 11 aug 99 and GM July 2000 (was -14) - numbered section head spacing
-\addcontentsline{toc}{#1}{%
-            \ifnum #2>\c@secnumdepth \else
-                \protect\numberline{\csname the#1\endcsname}%
-            \fi
-            #7%
-        }%
-    \else
-        \def\@svsechd{%
-            #6%
-            \hskip #3\relax
-            \@svsec
-            \if@uchead
-                \uppercase{#8}%
-            \else
-                #8%
-            \fi
-            \csname #1mark\endcsname{#7}%
-            \addcontentsline{toc}{#1}{%
-                \ifnum #2>\c@secnumdepth \else
-                    \protect\numberline{\csname the#1\endcsname}%
-                \fi
-                #7%
-            }%
-        }%
-    \fi
-    \@xsect{#5}\hskip 1pt
-    \par
-}
-\def\@xsect#1{%
-    \@tempskipa #1\relax
-    \ifdim \@tempskipa>\z@
-        \par
-        \nobreak
-        \vskip \@tempskipa
-        \@afterheading
-    \else
-        \global\@nobreakfalse
-        \global\@noskipsectrue
-        \everypar{%
-            \if@noskipsec
-                \global\@noskipsecfalse
-                \clubpenalty\@M
-                \hskip -\parindent
-                \begingroup
-                    \@svsechd
-                    \@period
-                \endgroup
-                \unskip
-                \@tempskipa #1\relax
-                \hskip -\@tempskipa
-            \else
-                \clubpenalty \@clubpenalty
-                \everypar{}%
-            \fi
-        }%
-    \fi
-    \ignorespaces
-}
-\def\@trivlist{%
-    \@topsepadd\topsep
-    \if@noskipsec
-        \global\let\@period\@empty
-        \leavevmode
-        \global\let\@period.%
-    \fi
-    \ifvmode
-        \advance\@topsepadd\partopsep
-    \else
-        \unskip
-        \par
-    \fi
-    \if@inlabel
-        \@noparitemtrue
-        \@noparlisttrue
-    \else
-        \@noparlistfalse
-        \@topsep\@topsepadd
-    \fi
-    \advance\@topsep \parskip
-    \leftskip\z@skip
-    \rightskip\@rightskip
-    \parfillskip\@flushglue
-    \@setpar{\if@newlist\else{\@@par}\fi}
-    \global\@newlisttrue
-    \@outerparskip\parskip
-}
-
-%%% Actually, 'abbrev' works just fine as the default
-%%% Bibliography style.
-
-\typeout{Using 'Abbrev' bibliography style}
-\newcommand\bibyear[2]{%
-    \unskip\quad\ignorespaces#1\unskip
-    \if#2..\quad \else \quad#2 \fi
-}
-\newcommand{\bibemph}[1]{{\em#1}}
-\newcommand{\bibemphic}[1]{{\em#1\/}}
-\newcommand{\bibsc}[1]{{\sc#1}}
-\def\@normalcite{%
-    \def\@cite##1##2{[##1\if@tempswa , ##2\fi]}%
-}
-\def\@citeNB{%
-    \def\@cite##1##2{##1\if@tempswa , ##2\fi}%
-}
-\def\@citeRB{%
-    \def\@cite##1##2{##1\if@tempswa , ##2\fi]}%
-}
-\def\start@cite#1#2{%
-    \edef\citeauthoryear##1##2##3{%
-        ###1%
-        \ifnum#2=\z@ \else\ ###2\fi
-    }%
-    \ifnum#1=\thr@@
-        \let\@@cite\@citeyear
-    \else
-        \let\@@cite\@citenormal
-    \fi
-    \@ifstar{\@citeNB\@@cite}{\@normalcite\@@cite}%
-}
-\def\cite{\start@cite23}
-\def\citeNP{\cite*}
-\def\citeA{\start@cite10}
-\def\citeANP{\citeA*}
-\def\shortcite{\start@cite23}
-\def\shortciteNP{\shortcite*}
-\def\shortciteA{\start@cite20}
-\def\shortciteANP{\shortciteA*}
-\def\citeyear{\start@cite30}
-\def\citeyearNP{\citeyear*}
-\def\citeN{%
-    \@citeRB
-    \def\citeauthoryear##1##2##3{##1\ [##3%
-        \def\reserved@a{##1}%
-        \def\citeauthoryear####1####2####3{%
-            \def\reserved@b{####1}%
-            \ifx\reserved@a\reserved@b
-                ####3%
-            \else
-                \errmessage{Package acmart Error: author mismatch
-                         in \string\citeN^^J^^J%
-                    See the acmart package documentation for explanation}%
-            \fi
-        }%
-    }%
-    \@ifstar\@citeyear\@citeyear
-}
-\def\shortciteN{%
-    \@citeRB
-    \def\citeauthoryear##1##2##3{##2\ [##3%
-        \def\reserved@a{##2}%
-        \def\citeauthoryear####1####2####3{%
-            \def\reserved@b{####2}%
-            \ifx\reserved@a\reserved@b
-                ####3%
-            \else
-                \errmessage{Package acmart Error: author mismatch
-                         in \string\shortciteN^^J^^J%
-                    See the acmart package documentation for explanation}%
-            \fi
-        }%
-    }%
-    \@ifstar\@citeyear\@citeyear  % GM July 2000
-}
-\def\@citenormal{%
-    \@ifnextchar [{\@tempswatrue\@citex;}
-                  {\@tempswafalse\@citex,[]}% % GM July 2000
-}
-\def\@citeyear{%
-    \@ifnextchar [{\@tempswatrue\@citex,}%
-                  {\@tempswafalse\@citex,[]}%
-}
-\def\@citex#1[#2]#3{%
-    \let\@citea\@empty
-    \@cite{%
-        \@for\@citeb:=#3\do{%
-            \@citea
-            \def\@citea{#1 }%
-            \edef\@citeb{\expandafter\@iden\@citeb}%
-            \if@filesw
-                \immediate\write\@auxout{\string\citation{\@citeb}}%
-            \fi
-            \@ifundefined{b@\@citeb}{%
-                {\bf ?}%
-                \@warning{%
-                    Citation `\@citeb' on page \thepage\space undefined%
-                }%
-            }%
-            {\csname b@\@citeb\endcsname}%
-        }%
-    }{#2}%
-}
-\let\@biblabel\@gobble
-\newdimen\bibindent
-\setcounter{enumi}{1}
-\bibindent=0em
-\def\thebibliography#1{%
-\ifnum\addauflag=0\addauthorsection\global\addauflag=1\fi
-    \section{%
-       {REFERENCES}
-         \vskip -9pt  % GM July 2000 (for tighter spacing)
-        \@mkboth{{\refname}}{{\refname}}%
-    }%
-    \list{[\arabic{enumi}]}{%
-        \settowidth\labelwidth{[#1]}%
-        \leftmargin\labelwidth
-        \advance\leftmargin\labelsep
-        \advance\leftmargin\bibindent
-        \parsep=0pt\itemsep=1pt % GM July 2000
-        \itemindent -\bibindent
-        \listparindent \itemindent
-        \usecounter{enumi}
-    }%
-    \let\newblock\@empty
-    \raggedright % GM July 2000
-    \sloppy
-    \sfcode`\.=1000\relax
-}
-
-
-\gdef\balancecolumns
-{\vfill\eject
-\global\@colht=\textheight
-\global\ht\@cclv=\textheight
-}
-
-\newcount\colcntr
-\global\colcntr=0
-\newbox\savebox
-
-\gdef \@makecol {%
-\global\advance\colcntr by 1
-\ifnum\colcntr>2 \global\colcntr=1\fi
-   \ifvoid\footins
-     \setbox\@outputbox \box\@cclv
-   \else
-     \setbox\@outputbox \vbox{%
-\boxmaxdepth \@maxdepth
-       \@tempdima\dp\@cclv
-       \unvbox \@cclv
-       \vskip-\@tempdima
-       \vskip \skip\footins
-       \color@begingroup
-         \normalcolor
-         \footnoterule
-         \unvbox \footins
-       \color@endgroup
-       }%
-   \fi
-   \xdef\@freelist{\@freelist\@midlist}%
-   \global \let \@midlist \@empty
-   \@combinefloats
-   \ifvbox\@kludgeins
-     \@makespecialcolbox
-   \else
-     \setbox\@outputbox \vbox to\@colht {%
-\@texttop
-       \dimen@ \dp\@outputbox
-       \unvbox \@outputbox
-   \vskip -\dimen@
-       \@textbottom
-       }%
-   \fi
-   \global \maxdepth \@maxdepth
-}
-\def\titlenote{\@ifnextchar[\@xtitlenote{\stepcounter\@mpfn
-\global\advance\titlenotecount by 1
-\ifnum\titlenotecount=1
-    \raisebox{9pt}{$\ast$}
-\fi
-\ifnum\titlenotecount=2
-    \raisebox{9pt}{$\dagger$}
-\fi
-\ifnum\titlenotecount=3
-    \raisebox{9pt}{$\ddagger$}
-\fi
-\ifnum\titlenotecount=4
-\raisebox{9pt}{$\S$}
-\fi
-\ifnum\titlenotecount=5
-\raisebox{9pt}{$\P$}
-\fi
-         \@titlenotetext
-}}
-
-\long\def\@titlenotetext#1{\insert\footins{%
-\ifnum\titlenotecount=1\global\tntoks={#1}\fi
-\ifnum\titlenotecount=2\global\tntokstwo={#1}\fi
-\ifnum\titlenotecount=3\global\tntoksthree={#1}\fi
-\ifnum\titlenotecount=4\global\tntoksfour={#1}\fi
-\ifnum\titlenotecount=5\global\tntoksfive={#1}\fi
-    \reset@font\footnotesize
-    \interlinepenalty\interfootnotelinepenalty
-    \splittopskip\footnotesep
-    \splitmaxdepth \dp\strutbox \floatingpenalty \@MM
-    \hsize\columnwidth \@parboxrestore
-    \protected@edef\@currentlabel{%
-    }%
-    \color@begingroup
-   \color@endgroup}}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%
-\ps@plain
-\baselineskip=11pt
-\let\thepage\relax % For NO page numbers - GM Nov. 30th. 1999 and July 2000
-\def\setpagenumber#1{\global\setcounter{page}{#1}}
-%\pagenumbering{arabic}  % Arabic page numbers GM July 2000
-\twocolumn             % Double column.
-\flushbottom           % Even bottom -- alas, does not balance columns at end of document
-\pagestyle{plain}
-
-% Need Copyright Year and Copyright Data to be user definable (in .tex file).
-% Gerry Nov. 30th. 1999
-\newtoks\copyrtyr
-\newtoks\acmcopyr
-\newtoks\boilerplate
-%...
-%... Lisa needed the following to be an integral part of this particular class file
-%...
-%Copyright is held by the author/owner(s).
-%WWW2005, May 10--14, 2005, Chiba, Japan.
-%ACM 1-58113-680-3/04/0005.
-%
-%...
-%...
-%...
-%\global\acmcopyr={1-58113-449-5/02/0005}  % Default
-\global\acmcopyr={xxx}  % Default
-\global\copyrtyr={2003}                % Default - 12/4/2000 *** Gerry
-\def\CopyrightYear#1{\global\copyrtyr{#1}}
-\def\crdata#1{\global\acmcopyr{#1}}
-\def\permission#1{\global\boilerplate{#1}}
-%
-\global\boilerplate={Copyright is held by the author/owner(s).\\ {\it Operating System Reviews}}
-\newtoks\copyrightetc
-% original \global\copyrightetc{ACM 1-58113-844-X/04/0005.\the\acmcopyr}
-% commented out following line---cew
-%\global\copyrightetc{ACM 1-58113-844-X/04/0005}  % "xxx" not needed after the period.
-
-\toappear{\the\boilerplate\par
-{\confname{\the\conf}} \the\confinfo\par \the\copyrightetc.}
-%
-%% End of www2005-submission.cls -- V1.4 - 1/26/2004 --
-%% Gerry Murray -- Monday January 26th. 2004
+% "WWW2005-submission.CLS" - VERSION 1.4
+% "COMPATIBLE" WITH THE "ACM_PROC_ARTICLE-SP.CLS" V2.6SP
+% PRODUCES A 'TIGHTER' PAPER AND DOES INCLUDE A RELEASE STATEMENT
+% SPECIFICALLY TAILORED FOR WWW'YY. (As originally requested by Sheridan Printing 1/24/2002)
+% Original by Gerald Murray January 24th. 2002
+% Improved on 11/11/2002 - GM
+% Modified for "www2004" 1/26/2004 - GM
+% ---- Start of 'updates'  ----
+%
+% Allowance made to switch default fonts between those systems using
+% METAFONT and those using 'Type 1' or 'Truetype' fonts.
+% See LINE NUMBER 236 for details.
+% Also provided for enumerated/annotated Corollaries 'surrounded' by
+% enumerated Theorems (line 790).
+% Gerry November 11th. 1999
+%
+% Georgia fixed bug in sub-sub-section numbering in paragraphs (July 29th. 2002)
+% JS/GM fix to vertical spacing before Proofs (July 30th. 2002)
+% Superscript fix Oct. 2002
+%
+% ---- End of 'updates' ----
+%
+\def\fileversion{v1.4}          % for ACM's tracking purposes
+\def\filedate{January 26, 2004}    % Gerry Murray's tracking data
+\def\docdate {Monday 26th. January 2004} % Gerry Murray (with deltas to doc}
+\usepackage{epsfig}
+\usepackage{amssymb}
+\usepackage{amsmath}
+\usepackage{amsfonts}
+%
+% WWW-RELEASE DOCUMENT STYLE
+% G.K.M. Tobin August-October 1999
+%    adapted from ARTICLE document style by Ken Traub, Olin Shivers
+%    also using elements of esub2acm.cls
+% HEAVILY MODIFIED, SUBSEQUENTLY, BY GERRY MURRAY 2000
+% ARTICLE DOCUMENT STYLE -- Released 16 March 1988
+%    for LaTeX version 2.09
+% Copyright (C) 1988 by Leslie Lamport
+%
+%
+%%% www-release.cls is an 'ALTERNATE-RELEASE' document style for producing
+%%% two-column camera-ready pages specifically for the WWW'02 conference.
+%%% THIS FILE DOES NOT STRICTLY ADHERE TO THE SIGS (BOARD-ENDORSED)
+%%% PROCEEDINGS STYLE. It has been designed to produce a 'tighter'
+%%% paper in response to concerns over page budgets.
+%%% The main features of this style are:
+%%%
+%%% 1)  Two columns.
+%%% 2)  Side and top margins of 4.5pc, bottom margin of 6pc, column gutter of
+%%%     2pc, hence columns are 20pc wide and 55.5pc tall.  (6pc =3D 1in, approx)
+%%% 3)  First page has title information, and an extra 6pc of space at the
+%%%     bottom of the first column for the ACM copyright notice.
+%%% 4)  Text is 9pt on 10pt baselines; titles (except main) are 9pt bold.
+%%%
+%%%
+%%% There are a few restrictions you must observe:
+%%%
+%%% 1)  You cannot change the font size; ACM wants you to use 9pt.
+%%% 3)  You must start your paper with the \maketitle command.  Prior to the
+%%%     \maketitle you must have \title and \author commands.  If you have a
+%%%     \date command it will be ignored; no date appears on the paper, since
+%%%     the proceedings will have a date on the front cover.
+%%% 4)  Marginal paragraphs, tables of contents, lists of figures and tables,
+%%%     and page headings are all forbidden.
+%%% 5)  The `figure' environment will produce a figure one column wide; if you
+%%%     want one that is two columns wide, use `figure*'.
+%%%
+%
+%%%
+%%% WARNING:
+%%% Some dvi-ps converters heuristically allow chars to drift from their
+%%% true positions a few pixels. This may be noticeable with the 9pt sans-serif
+%%% bold font used for section headers.
+%%% You may turn this hackery off via the -e option:
+%%%     dvips -e 0 foo.dvi >foo.ps
+%%%
+\typeout{Document Class 'www2005-submission' From www-release by Gerry Murray}
+\typeout{Based in part upon document Style `acmconf' <22 May 89>. Hacked 4/91 by}
+\typeout{shivers@cs.cmu.edu, 4/93 by theobald@cs.mcgill.ca}
+\typeout{Excerpts were taken from (Journal Style) 'esub2acm.cls'.}
+\typeout{****** Bugs/comments/suggestions/technicalities to Gerry Murray -- murray@hq.acm.org ******}
+\typeout{Questions on the style, SIGS policies, etc. to Julie Goetz goetz@acm.org or Adrienne Griscti griscti@acm.org}
+\oddsidemargin 4.5pc
+\evensidemargin 4.5pc
+\advance\oddsidemargin by -1in  % Correct for LaTeX gratuitousness
+\advance\evensidemargin by -1in % Correct for LaTeX gratuitousness
+\marginparwidth 0pt             % Margin pars are not allowed.
+\marginparsep 11pt              % Horizontal space between outer margin and
+                                % marginal note
+
+                                % Top of page:
+\topmargin 4.5pc                % Nominal distance from top of page to top of
+                                % box containing running head.
+\advance\topmargin by -1in      % Correct for LaTeX gratuitousness
+\headheight 0pt                 % Height of box containing running head.
+\headsep 0pt                    % Space between running head and text.
+                                % Bottom of page:
+\footskip 30pt                  % Distance from baseline of box containing foot
+                                % to baseline of last line of text.
+\@ifundefined{footheight}{\newdimen\footheight}{}% this is for LaTeX2e
+\footheight 12pt                % Height of box containing running foot.
+
+%% Must redefine the top margin so there's room for headers and
+%% page numbers if you are using the preprint option. Footers
+%% are OK as is. Olin.
+\advance\topmargin by -37pt     % Leave 37pt above text for headers
+\headheight 12pt                % Height of box containing running head.
+\headsep 25pt                   % Space between running head and text.
+
+\textheight 666pt       % 9 1/4 column height
+\textwidth 42pc         % Width of text line.
+                        % For two-column mode:
+\columnsep 2pc          %    Space between columns
+\columnseprule 0pt      %    Width of rule between columns.
+\hfuzz 1pt              % Allow some variation in column width, otherwise it's
+                        % too hard to typeset in narrow columns.
+
+\footnotesep 5.6pt      % Height of strut placed at the beginning of every
+                        % footnote =3D height of normal \footnotesize strut,
+                        % so no extra space between footnotes.
+
+\skip\footins 8.1pt plus 4pt minus 2pt  % Space between last line of text and
+                                        % top of first footnote.
+\floatsep 11pt plus 2pt minus 2pt       % Space between adjacent floats moved
+                                        % to top or bottom of text page.
+\textfloatsep 18pt plus 2pt minus 4pt   % Space between main text and floats
+                                        % at top or bottom of page.
+\intextsep 11pt plus 2pt minus 2pt      % Space between in-text figures and
+                                        % text.
+\@ifundefined{@maxsep}{\newdimen\@maxsep}{}% this is for LaTeX2e
+\@maxsep 18pt                           % The maximum of \floatsep,
+                                        % \textfloatsep and \intextsep (minus
+                                        % the stretch and shrink).
+\dblfloatsep 11pt plus 2pt minus 2pt    % Same as \floatsep for double-column
+                                        % figures in two-column mode.
+\dbltextfloatsep 18pt plus 2pt minus 4pt% \textfloatsep for double-column
+                                        % floats.
+\@ifundefined{@dblmaxsep}{\newdimen\@dblmaxsep}{}% this is for LaTeX2e
+\@dblmaxsep 18pt                        % The maximum of \dblfloatsep and
+                                        % \dbltexfloatsep.
+\@fptop 0pt plus 1fil    % Stretch at top of float page/column. (Must be
+                         % 0pt plus ...)
+\@fpsep 8pt plus 2fil    % Space between floats on float page/column.
+\@fpbot 0pt plus 1fil    % Stretch at bottom of float page/column. (Must be
+                         % 0pt plus ... )
+\@dblfptop 0pt plus 1fil % Stretch at top of float page. (Must be 0pt plus ...)
+\@dblfpsep 8pt plus 2fil % Space between floats on float page.
+\@dblfpbot 0pt plus 1fil % Stretch at bottom of float page. (Must be
+                         % 0pt plus ... )
+\marginparpush 5pt       % Minimum vertical separation between two marginal
+                         % notes.
+
+\parskip 0pt plus 1pt            % Extra vertical space between paragraphs.
+\parindent 9pt  % GM July 2000 / was 0pt - width of paragraph indentation.
+\partopsep 2pt plus 1pt minus 1pt% Extra vertical space, in addition to
+                                 % \parskip and \topsep, added when user
+                                 % leaves blank line before environment.
+
+\@lowpenalty   51       % Produced by \nopagebreak[1] or \nolinebreak[1]
+\@medpenalty  151       % Produced by \nopagebreak[2] or \nolinebreak[2]
+\@highpenalty 301       % Produced by \nopagebreak[3] or \nolinebreak[3]
+
+\@beginparpenalty -\@lowpenalty % Before a list or paragraph environment.
+\@endparpenalty   -\@lowpenalty % After a list or paragraph environment.
+\@itempenalty     -\@lowpenalty % Between list items.
+
+\@namedef{ds@10pt}{\@latexerr{The `10pt' option is not allowed in the `acmconf'
+  document style.}\@eha}
+\@namedef{ds@11pt}{\@latexerr{The `11pt' option is not allowed in the `acmconf'
+  document style.}\@eha}
+\@namedef{ds@12pt}{\@latexerr{The `12pt' option is not allowed in the `acmconf'
+  document style.}\@eha}
+
+\@options
+
+\lineskip 2pt           % \lineskip is 1pt for all font sizes.
+\normallineskip 2pt
+\def\baselinestretch{1}
+
+\abovedisplayskip 9pt plus2pt minus4.5pt%
+\belowdisplayskip \abovedisplayskip
+\abovedisplayshortskip  \z@ plus3pt%
+\belowdisplayshortskip  5.4pt plus3pt minus3pt%
+\let\@listi\@listI     % Setting of \@listi added 9 Jun 87
+
+\def\small{\@setsize\small{9pt}\viiipt\@viiipt
+\abovedisplayskip 7.6pt plus 3pt minus 4pt%
+\belowdisplayskip \abovedisplayskip
+\abovedisplayshortskip \z@ plus2pt%
+\belowdisplayshortskip 3.6pt plus2pt minus 2pt
+\def\@listi{\leftmargin\leftmargini %% Added 22 Dec 87
+\topsep 4pt plus 2pt minus 2pt\parsep 2pt plus 1pt minus 1pt
+\itemsep \parsep}}
+
+\def\footnotesize{\@setsize\footnotesize{9pt}\ixpt\@ixpt
+\abovedisplayskip 6.4pt plus 2pt minus 4pt%
+\belowdisplayskip \abovedisplayskip
+\abovedisplayshortskip \z@ plus 1pt%
+\belowdisplayshortskip 2.7pt plus 1pt minus 2pt
+\def\@listi{\leftmargin\leftmargini %% Added 22 Dec 87
+\topsep 3pt plus 1pt minus 1pt\parsep 2pt plus 1pt minus 1pt
+\itemsep \parsep}}
+
+\newcount\aucount
+\newcount\originalaucount
+\newdimen\auwidth
+\auwidth=\textwidth
+\newdimen\auskip
+\newcount\auskipcount
+\newdimen\auskip
+\global\auskip=1pc
+\newdimen\allauboxes
+\allauboxes=\auwidth
+\newtoks\addauthors
+\newcount\addauflag
+\global\addauflag=0 %Haven't shown additional authors yet
+
+\newtoks\subtitletext
+\gdef\subtitle#1{\subtitletext={#1}}
+
+\gdef\additionalauthors#1{\addauthors={#1}}
+
+\gdef\numberofauthors#1{\global\aucount=#1
+\ifnum\aucount>3\global\originalaucount=\aucount \global\aucount=3\fi %g}
+\global\auskipcount=\aucount\global\advance\auskipcount by 1
+\global\multiply\auskipcount by 2
+\global\multiply\auskip by \auskipcount
+\global\advance\auwidth by -\auskip
+\global\divide\auwidth by \aucount}
+
+% \and was modified to count the number of authors.  GKMT 12 Aug 1999
+\def\alignauthor{%                  % \begin{tabular}
+\end{tabular}%
+  \begin{tabular}[t]{p{\auwidth}}\centering}%
+
+%  *** NOTE *** NOTE *** NOTE *** NOTE ***
+%  If you have 'font problems' then you may need
+%  to change these, e.g. 'arialb' instead of "arialbd".
+%  Gerry Murray 11/11/1999
+%  *** OR ** comment out block A and activate block B or vice versa.
+% **********************************************
+%
+%  -- Start of block A -- (Type 1 or Truetype fonts)
+%\newfont{\secfnt}{timesbd at 12pt} % was timenrb originally - now is timesbd
+%\newfont{\secit}{timesbi at 12pt}   %13 Jan 00 gkmt
+%\newfont{\subsecfnt}{timesi at 11pt} % was timenrri originally - now is timesi
+%\newfont{\subsecit}{timesbi at 11pt} % 13 Jan 00 gkmt -- was times changed to timesbi gm 2/4/2000
+%                         % because "normal" is italic, "italic" is Roman
+%\newfont{\ttlfnt}{arialbd at 18pt} % was arialb originally - now is arialbd
+%\newfont{\ttlit}{arialbi at 18pt}    % 13 Jan 00 gkmt
+%\newfont{\subttlfnt}{arial at 14pt} % was arialr originally - now is arial
+%\newfont{\subttlit}{ariali at 14pt} % 13 Jan 00 gkmt
+%\newfont{\subttlbf}{arialbd at 14pt}  % 13 Jan 00 gkmt
+%\newfont{\aufnt}{arial at 12pt} % was arialr originally - now is arial
+%\newfont{\auit}{ariali at 12pt} % 13 Jan 00 gkmt
+%\newfont{\affaddr}{arial at 10pt} % was arialr originally - now is arial
+%\newfont{\affaddrit}{ariali at 10pt} %13 Jan 00 gkmt
+%\newfont{\eaddfnt}{arial at 12pt} % was arialr originally - now is arial
+%\newfont{\ixpt}{times at 9pt} % was timenrr originally - now is times
+%\newfont{\confname}{timesi at 8pt} % was timenrri - now is timesi
+%\newfont{\crnotice}{times at 8pt} % was timenrr originally - now is times
+%\newfont{\ninept}{times at 9pt} % was timenrr originally - now is times
+% *********************************************
+%  -- End of block A --
+%
+%
+% -- Start of block B -- METAFONT
+% +++++++++++++++++++++++++++++++++++++++++++++
+% Next (default) block for those using Metafont
+% Gerry Murray 11/11/1999
+% *** THIS BLOCK FOR THOSE USING METAFONT *****
+% *********************************************
+\newfont{\secfnt}{ptmb at 12pt}
+\newfont{\secit}{ptmbi at 12pt}    %13 Jan 00 gkmt
+\newfont{\subsecfnt}{ptmri at 11pt}
+\newfont{\subsecit}{ptmbi at 11pt}  % 13 Jan 00 gkmt -- was ptmr changed to ptmbi gm 2/4/2000
+                         % because "normal" is italic, "italic" is Roman
+\newfont{\ttlfnt}{phvb at 18pt}
+\newfont{\ttlit}{phvbo at 18pt}    % GM 2/4/2000
+\newfont{\subttlfnt}{phvr at 14pt}
+\newfont{\subttlit}{phvro at 14pt} % GM 2/4/2000
+\newfont{\subttlbf}{phvb at 14pt}  % 13 Jan 00 gkmt
+\newfont{\aufnt}{phvr at 12pt}
+\newfont{\auit}{phvro at 12pt}     % GM 2/4/2000
+\newfont{\affaddr}{phvr at 10pt}
+\newfont{\affaddrit}{phvro at 10pt} % GM 2/4/2000
+\newfont{\eaddfnt}{phvr at 12pt}
+\newfont{\ixpt}{ptmr at 9pt}
+\newfont{\confname}{ptmri at 8pt}
+\newfont{\crnotice}{ptmr at 8pt}
+\newfont{\ninept}{ptmr at 9pt}
+% +++++++++++++++++++++++++++++++++++++++++++++
+% -- End of block B --
+
+\def\email#1{{{\eaddfnt{\vskip 4pt#1}}}}
+
+\def\addauthorsection{\ifnum\originalaucount>3
+    \section{Additional Authors}\the\addauthors
+  \fi}
+
+\newcount\savesection
+\newcount\sectioncntr
+\global\sectioncntr=1
+
+\setcounter{secnumdepth}{3}
+
+\def\appendix{\par
+\section*{APPENDIX}
+\setcounter{section}{0}
+ \setcounter{subsection}{0}
+ \def\thesection{\Alph{section}} }
+
+\leftmargini 22.5pt
+\leftmarginii 19.8pt    % > \labelsep + width of '(m)'
+\leftmarginiii 16.8pt   % > \labelsep + width of 'vii.'
+\leftmarginiv 15.3pt    % > \labelsep + width of 'M.'
+\leftmarginv 9pt
+\leftmarginvi 9pt
+
+\leftmargin\leftmargini
+\labelsep 4.5pt
+\labelwidth\leftmargini\advance\labelwidth-\labelsep
+
+\def\@listI{\leftmargin\leftmargini \parsep 3.6pt plus 2pt minus 1pt%
+\topsep 7.2pt plus 2pt minus 4pt%
+\itemsep 3.6pt plus 2pt minus 1pt}
+
+\let\@listi\@listI
+\@listi
+
+\def\@listii{\leftmargin\leftmarginii
+   \labelwidth\leftmarginii\advance\labelwidth-\labelsep
+   \topsep 3.6pt plus 2pt minus 1pt
+   \parsep 1.8pt plus 0.9pt minus 0.9pt
+   \itemsep \parsep}
+
+\def\@listiii{\leftmargin\leftmarginiii
+    \labelwidth\leftmarginiii\advance\labelwidth-\labelsep
+    \topsep 1.8pt plus 0.9pt minus 0.9pt
+    \parsep \z@ \partopsep 1pt plus 0pt minus 1pt
+    \itemsep \topsep}
+
+\def\@listiv{\leftmargin\leftmarginiv
+     \labelwidth\leftmarginiv\advance\labelwidth-\labelsep}
+
+\def\@listv{\leftmargin\leftmarginv
+     \labelwidth\leftmarginv\advance\labelwidth-\labelsep}
+
+\def\@listvi{\leftmargin\leftmarginvi
+     \labelwidth\leftmarginvi\advance\labelwidth-\labelsep}
+
+\def\labelenumi{\theenumi.}
+\def\theenumi{\arabic{enumi}}
+
+\def\labelenumii{(\theenumii)}
+\def\theenumii{\alph{enumii}}
+\def\p@enumii{\theenumi}
+
+\def\labelenumiii{\theenumiii.}
+\def\theenumiii{\roman{enumiii}}
+\def\p@enumiii{\theenumi(\theenumii)}
+
+\def\labelenumiv{\theenumiv.}
+\def\theenumiv{\Alph{enumiv}}
+\def\p@enumiv{\p@enumiii\theenumiii}
+
+\def\labelitemi{$\bullet$}
+\def\labelitemii{\bf --}
+\def\labelitemiii{$\ast$}
+\def\labelitemiv{$\cdot$}
+
+\def\verse{\let\\=\@centercr
+  \list{}{\itemsep\z@ \itemindent -1.5em\listparindent \itemindent
+          \rightmargin\leftmargin\advance\leftmargin 1.5em}\item[]}
+\let\endverse\endlist
+
+\def\quotation{\list{}{\listparindent 1.5em
+    \itemindent\listparindent
+    \rightmargin\leftmargin \parsep 0pt plus 1pt}\item[]}
+\let\endquotation=\endlist
+
+\def\quote{\list{}{\rightmargin\leftmargin}\item[]}
+\let\endquote=\endlist
+
+\def\descriptionlabel#1{\hspace\labelsep \bf #1}
+\def\description{\list{}{\labelwidth\z@ \itemindent-\leftmargin
+       \let\makelabel\descriptionlabel}}
+
+\let\enddescription\endlist
+
+\def\theequation{\arabic{equation}}
+
+\arraycolsep 4.5pt   % Half the space between columns in an array environment.
+\tabcolsep 5.4pt    % Half the space between columns in a tabular environment.
+\arrayrulewidth .4pt % Width of rules in array and tabular environment.
+\doublerulesep 1.8pt % Space between adjacent rules in array or tabular env.
+
+\tabbingsep \labelsep   % Space used by the \' command.  (See LaTeX manual.)
+
+\skip\@mpfootins =\skip\footins
+
+\fboxsep =2.7pt      % Space left between box and text by \fbox and \framebox.
+\fboxrule =.4pt      % Width of rules in box made by \fbox and \framebox.
+
+\def\thepart{\Roman{part}} % Roman numeral part numbers.
+\def\thesection       {\arabic{section}}
+\def\thesubsection    {\thesection.\arabic{subsection}}
+%\def\thesubsubsection {\thesubsection.\arabic{subsubsection}} % GM 7/30/2002
+%\def\theparagraph     {\thesubsubsection.\arabic{paragraph}}  % GM 7/30/2002
+\def\thesubparagraph  {\theparagraph.\arabic{subparagraph}}
+
+\def\@pnumwidth{1.55em}
+\def\@tocrmarg {2.55em}
+\def\@dotsep{4.5}
+\setcounter{tocdepth}{3}
+
+\def\tableofcontents{\@latexerr{\tableofcontents: Tables of contents are not
+  allowed in the `acmconf' document style.}\@eha}
+
+\def\l@part#1#2{\addpenalty{\@secpenalty}
+   \addvspace{2.25em plus 1pt}  % space above part line
+   \begingroup
+   \@tempdima 3em       % width of box holding part number, used by
+     \parindent \z@ \rightskip \@pnumwidth      %% \numberline
+     \parfillskip -\@pnumwidth
+     {\large \bf        % set line in \large boldface
+     \leavevmode        % TeX command to enter horizontal mode.
+     #1\hfil \hbox to\@pnumwidth{\hss #2}}\par
+     \nobreak           % Never break after part entry
+   \endgroup}
+
+\def\l@section#1#2{\addpenalty{\@secpenalty} % good place for page break
+   \addvspace{1.0em plus 1pt}   % space above toc entry
+   \@tempdima 1.5em             % width of box holding section number
+   \begingroup
+    \parindent  \z@ \rightskip \@pnumwidth
+     \parfillskip -\@pnumwidth
+     \bf                        % Boldface.
+     \leavevmode                % TeX command to enter horizontal mode.
+      \advance\leftskip\@tempdima %% added 5 Feb 88 to conform to
+      \hskip -\leftskip           %% 25 Jan 88 change to \numberline
+     #1\nobreak\hfil \nobreak\hbox to\@pnumwidth{\hss #2}\par
+   \endgroup}
+
+
+\def\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}}
+\def\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}}
+\def\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}}
+\def\l@subparagraph{\@dottedtocline{5}{10em}{5em}}
+
+\def\listoffigures{\@latexerr{\listoffigures: Lists of figures are not
+  allowed in the `acmconf' document style.}\@eha}
+
+\def\l@figure{\@dottedtocline{1}{1.5em}{2.3em}}
+
+\def\listoftables{\@latexerr{\listoftables: Lists of tables are not
+  allowed in the `acmconf' document style.}\@eha}
+\let\l@table\l@figure
+
+\def\footnoterule{\kern-3\p@
+  \hrule width .4\columnwidth
+  \kern 2.6\p@}                 % The \hrule has default height of .4pt .
+\long\def\@makefntext#1{\noindent
+\hbox to .5em{\hss\textsuperscript{\@thefnmark}}#1}  % C. Clifton / GM Oct. 2nd. 2002
+\long\def\@maketntext#1{\noindent
+#1}
+
+\long\def\@maketitlenotetext#1#2{\noindent
+            \hbox to 1.8em{\hss$^{#1}$}#2}
+
+\setcounter{topnumber}{2}
+\def\topfraction{.7}
+\setcounter{bottomnumber}{1}
+\def\bottomfraction{.3}
+\setcounter{totalnumber}{3}
+\def\textfraction{.2}
+\def\floatpagefraction{.5}
+\setcounter{dbltopnumber}{2}
+\def\dbltopfraction{.7}
+\def\dblfloatpagefraction{.5}
+
+%
+\long\def\@makecaption#1#2{
+   \vskip \baselineskip
+   \setbox\@tempboxa\hbox{\textbf{#1: #2}}
+   \ifdim \wd\@tempboxa >\hsize % IF longer than one line:
+       \textbf{#1: #2}\par               %   THEN set as ordinary paragraph.
+     \else                      %   ELSE  center.
+       \hbox to\hsize{\hfil\box\@tempboxa\hfil}\par
+   \fi}
+
+%
+
+\long\def\@makecaption#1#2{
+   \vskip 10pt
+   \setbox\@tempboxa\hbox{\textbf{#1: #2}}
+   \ifdim \wd\@tempboxa >\hsize % IF longer than one line:
+       \textbf{#1: #2}\par                %   THEN set as ordinary paragraph.
+     \else                      %   ELSE  center.
+       \hbox to\hsize{\hfil\box\@tempboxa\hfil}
+   \fi}
+
+\@ifundefined{figure}{\newcounter {figure}} % this is for LaTeX2e
+
+\def\fps@figure{tbp}
+\def\ftype@figure{1}
+\def\ext@figure{lof}
+\def\fnum@figure{Figure \thefigure}
+\def\figure{\@float{figure}}
+\let\endfigure\end@float
+\@namedef{figure*}{\@dblfloat{figure}}
+\@namedef{endfigure*}{\end@dblfloat}
+
+\@ifundefined{table}{\newcounter {table}} % this is for LaTeX2e
+
+\def\fps@table{tbp}
+\def\ftype@table{2}
+\def\ext@table{lot}
+\def\fnum@table{Table \thetable}
+\def\table{\@float{table}}
+\let\endtable\end@float
+\@namedef{table*}{\@dblfloat{table}}
+\@namedef{endtable*}{\end@dblfloat}
+
+\newtoks\titleboxnotes
+\newcount\titleboxnoteflag
+
+\def\maketitle{\par
+ \begingroup
+   \def\thefootnote{\fnsymbol{footnote}}
+   \def\@makefnmark{\hbox
+       to 0pt{$^{\@thefnmark}$\hss}}
+     \twocolumn[\@maketitle]
+\@thanks
+ \endgroup
+ \setcounter{footnote}{0}
+ \let\maketitle\relax
+ \let\@maketitle\relax
+ \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\gdef\@subtitle{}\let\thanks\relax
+ \@copyrightspace}
+
+%% CHANGES ON NEXT LINES
+\newif\if@ll % to record which version of LaTeX is in use
+
+\expandafter\ifx\csname LaTeXe\endcsname\relax % LaTeX2.09 is used
+\else% LaTeX2e is used, so set ll to true
+\global\@lltrue
+\fi
+
+\if@ll
+  \NeedsTeXFormat{LaTeX2e}
+  \ProvidesClass{www2005-submission} [2004/1/26 - V1.4 - based on sig-alt-release.cls]
+  \RequirePackage{latexsym}% QUERY: are these two really needed?
+  \let\dooptions\ProcessOptions
+\else
+  \let\dooptions\@options
+\fi
+%% END CHANGES
+
+\def\@height{height}
+\def\@width{width}
+\def\@minus{minus}
+\def\@plus{plus}
+\def\hb@xt@{\hbox to}
+\newif\if@faircopy
+\@faircopyfalse
+\def\ds@faircopy{\@faircopytrue}
+
+\def\ds@preprint{\@faircopyfalse}
+
+\@twosidetrue
+\@mparswitchtrue
+\def\ds@draft{\overfullrule 5\p@}
+%% CHANGE ON NEXT LINE
+\dooptions
+
+\lineskip \p@
+\normallineskip \p@
+\def\baselinestretch{1}
+\def\@ptsize{0} %needed for amssymbols.sty
+
+%% CHANGES ON NEXT LINES
+\if@ll% allow use of old-style font change commands in LaTeX2e
+\@maxdepth\maxdepth
+%
+\DeclareOldFontCommand{\rm}{\ninept\rmfamily}{\mathrm}
+\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf}
+\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt}
+\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf}
+\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit}
+\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl}
+\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc}
+\DeclareRobustCommand*{\cal}{\@fontswitch{\relax}{\mathcal}}
+\DeclareRobustCommand*{\mit}{\@fontswitch{\relax}{\mathnormal}}
+\fi
+%
+\if@ll
+ \renewcommand{\rmdefault}{cmr}  % was 'ttm'
+% Note! I have also found 'mvr' to work ESPECIALLY well.
+% Gerry - October 1999
+% You may need to change your LV1times.fd file so that sc is
+% mapped to cmcsc - -for smallcaps -- that is if you decide
+% to change {cmr} to {times} above. (Not recommended)
+  \renewcommand{\@ptsize}{}
+  \renewcommand{\normalsize}{%
+    \@setfontsize\normalsize\@ixpt{10.5\p@}%\ninept%
+    \abovedisplayskip 6\p@ \@plus2\p@ \@minus\p@
+    \belowdisplayskip \abovedisplayskip
+    \abovedisplayshortskip 6\p@ \@minus 3\p@
+    \belowdisplayshortskip 6\p@ \@minus 3\p@
+    \let\@listi\@listI
+  }
+\else
+  \def\@normalsize{%changed next to 9 from 10
+    \@setsize\normalsize{9\p@}\ixpt\@ixpt
+   \abovedisplayskip 6\p@ \@plus2\p@ \@minus\p@
+    \belowdisplayskip \abovedisplayskip
+    \abovedisplayshortskip 6\p@ \@minus 3\p@
+    \belowdisplayshortskip 6\p@ \@minus 3\p@
+    \let\@listi\@listI
+  }%
+\fi
+\if@ll
+  \newcommand\scriptsize{\@setfontsize\scriptsize\@viipt{8\p@}}
+  \newcommand\tiny{\@setfontsize\tiny\@vpt{6\p@}}
+  \newcommand\large{\@setfontsize\large\@xiipt{14\p@}}
+  \newcommand\Large{\@setfontsize\Large\@xivpt{18\p@}}
+  \newcommand\LARGE{\@setfontsize\LARGE\@xviipt{20\p@}}
+  \newcommand\huge{\@setfontsize\huge\@xxpt{25\p@}}
+  \newcommand\Huge{\@setfontsize\Huge\@xxvpt{30\p@}}
+\else
+  \def\scriptsize{\@setsize\scriptsize{8\p@}\viipt\@viipt}
+  \def\tiny{\@setsize\tiny{6\p@}\vpt\@vpt}
+  \def\large{\@setsize\large{14\p@}\xiipt\@xiipt}
+  \def\Large{\@setsize\Large{18\p@}\xivpt\@xivpt}
+  \def\LARGE{\@setsize\LARGE{20\p@}\xviipt\@xviipt}
+  \def\huge{\@setsize\huge{25\p@}\xxpt\@xxpt}
+  \def\Huge{\@setsize\Huge{30\p@}\xxvpt\@xxvpt}
+\fi
+\normalsize
+
+% make aubox hsize/number of authors up to 3, less gutter
+% then showbox gutter showbox gutter showbox -- GKMT Aug 99
+\newbox\@acmtitlebox
+\def\@maketitle{\newpage
+ \null
+ \setbox\@acmtitlebox\vbox{%
+\baselineskip 20pt
+\vskip 2em                   % Vertical space above title.
+   \begin{center}
+    {\ttlfnt \@title\par}       % Title set in 18pt Helvetica (Arial) bold size.
+    \vskip 1.5em                % Vertical space after title.
+%This should be the subtitle.
+{\subttlfnt \the\subtitletext\par}\vskip 1.25em%\fi
+    {\baselineskip 16pt\aufnt   % each author set in \12 pt Arial, in a
+     \lineskip .5em             % tabular environment
+     \begin{tabular}[t]{c}\@author
+     \end{tabular}\par}
+    \vskip 1.5em               % Vertical space after author.
+   \end{center}}
+ \dimen0=\ht\@acmtitlebox
+ \advance\dimen0 by -12.75pc\relax % Increased space for title box -- KBT
+ \unvbox\@acmtitlebox
+ \ifdim\dimen0<0.0pt\relax\vskip-\dimen0\fi}
+
+
+\newcount\titlenotecount
+\global\titlenotecount=0
+\newtoks\tntoks
+\newtoks\tntokstwo
+\newtoks\tntoksthree
+\newtoks\tntoksfour
+\newtoks\tntoksfive
+
+\def\abstract{
+\ifnum\titlenotecount>0 % was =1
+    \insert\footins{%
+    \reset@font\footnotesize
+        \interlinepenalty\interfootnotelinepenalty
+        \splittopskip\footnotesep
+        \splitmaxdepth \dp\strutbox \floatingpenalty \@MM
+        \hsize\columnwidth \@parboxrestore
+        \protected@edef\@currentlabel{%
+        }%
+        \color@begingroup
+\ifnum\titlenotecount=1
+      \@maketntext{%
+         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\@finalstrut\strutbox}%
+\fi
+\ifnum\titlenotecount=2
+      \@maketntext{%
+      \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\@finalstrut\strutbox}%
+\fi
+\ifnum\titlenotecount=3
+      \@maketntext{%
+         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\@finalstrut\strutbox}%
+\fi
+\ifnum\titlenotecount=4
+      \@maketntext{%
+         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\S$}\rule\z@\footnotesep\ignorespaces\the\tntoksfour\@finalstrut\strutbox}%
+\fi
+\ifnum\titlenotecount=5
+      \@maketntext{%
+         \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\S$}\rule\z@\footnotesep\ignorespaces\the\tntoksfour\par\@finalstrut\strutbox}%
+\@maketntext{%
+         \raisebox{4pt}{$\P$}\rule\z@\footnotesep\ignorespaces\the\tntoksfive\@finalstrut\strutbox}%
+\fi
+   \color@endgroup} %g}
+\fi
+\setcounter{footnote}{0}
+\section*{ABSTRACT}\normalsize%\ninept
+}
+
+\def\endabstract{\if@twocolumn\else\endquotation\fi}
+
+\def\keywords{\if@twocolumn
+\section*{Keywords}
+\else \small
+\quotation
+\fi}
+
+\def\terms{\if@twocolumn
+\section*{General Terms}
+\else \small
+\quotation
+\fi}
+
+% -- Classification needs to be a bit smart due to optionals - Gerry/Georgia November 2nd. 1999
+\newcount\catcount
+\global\catcount=1
+
+\def\category#1#2#3{%
+\ifnum\catcount=1
+\section*{Categories and Subject Descriptors}
+\advance\catcount by 1\else{\unskip; }\fi
+    \@ifnextchar [{\@category{#1}{#2}{#3}}{\@category{#1}{#2}{#3}[]}%
+}
+
+\def\@category#1#2#3[#4]{%
+    \begingroup
+        \let\and\relax
+            #1 [\textbf{#2}]%
+            \if!#4!%
+                \if!#3!\else : #3\fi
+            \else
+                :\space
+                \if!#3!\else #3\kern\z@---\hskip\z@\fi
+                \textit{#4}%
+            \fi
+    \endgroup
+}
+%
+
+%%% This section (written by KBT) handles the 1" box in the lower left
+%%% corner of the left column of the first page by creating a picture,
+%%% and inserting the predefined string at the bottom (with a negative
+%%% displacement to offset the space allocated for a non-existent
+%%% caption).
+%%%
+\newtoks\copyrightnotice
+\def\ftype@copyrightbox{8}
+\def\@copyrightspace{
+\@float{copyrightbox}[b]
+\begin{center}
+\setlength{\unitlength}{1pc}
+\begin{picture}(20,0) %Space for (specific) copyright notice was 20,6
+\put(0,-0.95){\crnotice{\@toappear}}
+\end{picture}
+\end{center}
+\end@float}
+
+\def\@toappear{} % Default setting blank - commands below change this.
+\long\def\toappear#1{\def\@toappear{\parbox[b]{20pc}{\baselineskip 9pt#1}}}
+\def\toappearbox#1{\def\@toappear{\raisebox{5pt}{\framebox[20pc]{\parbox[b]{19pc}{#1}}}}}
+
+\newtoks\conf
+\newtoks\confinfo
+\def\conferenceinfo#1#2{\global\conf={#1}\global\confinfo{#2}}
+
+
+\def\marginpar{\@latexerr{The \marginpar command is not allowed in the
+  `acmconf' document style.}\@eha}
+
+\mark{{}{}}     % Initializes TeX's marks
+
+\def\today{\ifcase\month\or
+  January\or February\or March\or April\or May\or June\or
+  July\or August\or September\or October\or November\or December\fi
+  \space\number\day, \number\year}
+
+\def\@begintheorem#1#2{%
+    \parskip 0pt % GM July 2000 (for tighter spacing)
+    \trivlist
+    \item[%
+        \hskip 10\p@
+        \hskip \labelsep
+        {{\sc #1}\hskip 5\p@\relax#2.}%
+    ]
+    \it
+}
+\def\@opargbegintheorem#1#2#3{%
+    \parskip 0pt % GM July 2000 (for tighter spacing)
+    \trivlist
+    \item[%
+        \hskip 10\p@
+        \hskip \labelsep
+        {\sc #1\ #2\             % This mod by Gerry to enumerate corollaries
+   \setbox\@tempboxa\hbox{(#3)}  % and bracket the 'corollary title'
+        \ifdim \wd\@tempboxa>\z@ % and retain the correct numbering of e.g. theorems
+            \hskip 5\p@\relax    % if they occur 'around' said corollaries.
+            \box\@tempboxa       % Gerry - Nov. 1999.
+        \fi.}%
+    ]
+    \it
+}
+\newif\if@qeded
+
+% -- original
+%\def\proof{%
+%  \vspace{-\parskip} % GM July 2000 (for tighter spacing)
+%    \global\@qededfalse
+%    \@ifnextchar[{\@xproof}{\@proof}%
+%}
+% -- end of original
+
+% (JSS) Fix for vertical spacing bug - Gerry Murray July 30th. 2002
+\def\proof{%
+\vspace{-\lastskip}\vspace{-\parsep}\penalty-51%
+\global\@qededfalse
+\@ifnextchar[{\@xproof}{\@proof}%
+}
+
+\def\endproof{%
+    \if@qeded\else\qed\fi
+    \endtrivlist
+}
+\def\@proof{%
+    \trivlist
+    \item[%
+        \hskip 10\p@
+        \hskip \labelsep
+        {\sc Proof.}%
+    ]
+    \ignorespaces
+}
+\def\@xproof[#1]{%
+    \trivlist
+    \item[\hskip 10\p@\hskip \labelsep{\sc Proof #1.}]%
+    \ignorespaces
+}
+\def\qed{%
+    \unskip
+    \kern 10\p@
+    \begingroup
+        \unitlength\p@
+        \linethickness{.4\p@}%
+        \framebox(6,6){}%
+    \endgroup
+    \global\@qededtrue
+}
+
+\def\newdef#1#2{%
+    \expandafter\@ifdefinable\csname #1\endcsname
+        {\@definecounter{#1}%
+         \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}%
+         \global\@namedef{#1}{\@defthm{#1}{#2}}%
+         \global\@namedef{end#1}{\@endtheorem}%
+    }%
+}
+\def\@defthm#1#2{%
+    \refstepcounter{#1}%
+    \@ifnextchar[{\@ydefthm{#1}{#2}}{\@xdefthm{#1}{#2}}%
+}
+\def\@xdefthm#1#2{%
+    \@begindef{#2}{\csname the#1\endcsname}%
+    \ignorespaces
+}
+\def\@ydefthm#1#2[#3]{%
+    \trivlist
+    \item[%
+        \hskip 10\p@
+        \hskip \labelsep
+        {\it #2%
+         \savebox\@tempboxa{#3}%
+         \ifdim \wd\@tempboxa>\z@
+            \ \box\@tempboxa
+         \fi.%
+        }]%
+    \ignorespaces
+}
+\def\@begindef#1#2{%
+    \trivlist
+    \item[%
+        \hskip 10\p@
+        \hskip \labelsep
+        {\it #1\ \rm #2.}%
+    ]%
+}
+\def\theequation{\arabic{equation}}
+
+\newcounter{part}
+\newcounter{section}
+\newcounter{subsection}[section]
+\newcounter{subsubsection}[subsection]
+\newcounter{paragraph}[subsubsection]
+\def\thepart{\Roman{part}}
+\def\thesection{\arabic{section}}
+\def\thesubsection{\thesection.\arabic{subsection}}
+\def\thesubsubsection{\thesubsection.\arabic{subsubsection}} %removed \subsecfnt 29 July 2002 gkmt
+\def\theparagraph{\thesubsubsection.\arabic{paragraph}} %removed \subsecfnt 29 July 2002 gkmt
+\newif\if@uchead
+\@ucheadfalse
+
+%% CHANGES: NEW NOTE
+%% NOTE: OK to use old-style font commands below, since they were
+%% suitably redefined for LaTeX2e
+%% END CHANGES
+\setcounter{secnumdepth}{3}
+\def\part{%
+    \@startsection{part}{9}{\z@}{-10\p@ \@plus -4\p@ \@minus -2\p@}
+        {4\p@}{\normalsize\@ucheadtrue}%
+}
+\def\section{%
+    \@startsection{section}{1}{\z@}{-10\p@ \@plus -4\p@ \@minus -2\p@}% GM
+    {4\p@}{\baselineskip 14pt\secfnt\@ucheadtrue}%
+}
+
+\def\subsection{%
+    \@startsection{subsection}{2}{\z@}{-8\p@ \@plus -2\p@ \@minus -\p@}
+    {4\p@}{\secfnt}%
+}
+\def\subsubsection{%
+    \@startsection{subsubsection}{3}{\z@}{-8\p@ \@plus -2\p@ \@minus -\p@}%
+    {4\p@}{\subsecfnt}%
+}
+\def\paragraph{%
+    \vskip 12pt\@startsection{paragraph}{3}{\z@}{6\p@ \@plus \p@}%
+    {-5\p@}{\subsecfnt}%
+}
+\let\@period=.
+\def\@startsection#1#2#3#4#5#6{%
+        \if@noskipsec  %gkmt, 11 aug 99
+        \global\let\@period\@empty
+        \leavevmode
+        \global\let\@period.%
+    \fi
+      \par %
+    \@tempskipa #4\relax
+    \@afterindenttrue
+    \ifdim \@tempskipa <\z@
+        \@tempskipa -\@tempskipa
+        \@afterindentfalse
+    \fi
+    \if@nobreak
+    \everypar{}%
+    \else
+        \addpenalty\@secpenalty
+        \addvspace\@tempskipa
+    \fi
+\parskip=0pt % GM July 2000 (non numbered) section heads
+    \@ifstar
+        {\@ssect{#3}{#4}{#5}{#6}}
+        {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}%
+}
+\def\@sect#1#2#3#4#5#6[#7]#8{%
+    \ifnum #2>\c@secnumdepth
+        \let\@svsec\@empty
+    \else
+        \refstepcounter{#1}%
+        \edef\@svsec{%
+            \begingroup
+                %\ifnum#2>2 \noexpand\rm \fi % changed to next 29 July 2002 gkmt
+			\ifnum#2>2 \noexpand#6 \fi
+                \csname the#1\endcsname
+            \endgroup
+            \ifnum #2=1\relax .\fi
+            \hskip 1em
+        }%
+    \fi
+    \@tempskipa #5\relax
+    \ifdim \@tempskipa>\z@
+        \begingroup
+            #6\relax
+            \@hangfrom{\hskip #3\relax\@svsec}%
+            \begingroup
+                \interlinepenalty \@M
+                \if@uchead
+                    \uppercase{#8}%
+                \else
+                    #8%
+                \fi
+                \par
+            \endgroup
+        \endgroup
+        \csname #1mark\endcsname{#7}%
+        \vskip -12pt  %gkmt, 11 aug 99 and GM July 2000 (was -14) - numbered section head spacing
+\addcontentsline{toc}{#1}{%
+            \ifnum #2>\c@secnumdepth \else
+                \protect\numberline{\csname the#1\endcsname}%
+            \fi
+            #7%
+        }%
+    \else
+        \def\@svsechd{%
+            #6%
+            \hskip #3\relax
+            \@svsec
+            \if@uchead
+                \uppercase{#8}%
+            \else
+                #8%
+            \fi
+            \csname #1mark\endcsname{#7}%
+            \addcontentsline{toc}{#1}{%
+                \ifnum #2>\c@secnumdepth \else
+                    \protect\numberline{\csname the#1\endcsname}%
+                \fi
+                #7%
+            }%
+        }%
+    \fi
+    \@xsect{#5}\hskip 1pt
+    \par
+}
+\def\@xsect#1{%
+    \@tempskipa #1\relax
+    \ifdim \@tempskipa>\z@
+        \par
+        \nobreak
+        \vskip \@tempskipa
+        \@afterheading
+    \else
+        \global\@nobreakfalse
+        \global\@noskipsectrue
+        \everypar{%
+            \if@noskipsec
+                \global\@noskipsecfalse
+                \clubpenalty\@M
+                \hskip -\parindent
+                \begingroup
+                    \@svsechd
+                    \@period
+                \endgroup
+                \unskip
+                \@tempskipa #1\relax
+                \hskip -\@tempskipa
+            \else
+                \clubpenalty \@clubpenalty
+                \everypar{}%
+            \fi
+        }%
+    \fi
+    \ignorespaces
+}
+\def\@trivlist{%
+    \@topsepadd\topsep
+    \if@noskipsec
+        \global\let\@period\@empty
+        \leavevmode
+        \global\let\@period.%
+    \fi
+    \ifvmode
+        \advance\@topsepadd\partopsep
+    \else
+        \unskip
+        \par
+    \fi
+    \if@inlabel
+        \@noparitemtrue
+        \@noparlisttrue
+    \else
+        \@noparlistfalse
+        \@topsep\@topsepadd
+    \fi
+    \advance\@topsep \parskip
+    \leftskip\z@skip
+    \rightskip\@rightskip
+    \parfillskip\@flushglue
+    \@setpar{\if@newlist\else{\@@par}\fi}
+    \global\@newlisttrue
+    \@outerparskip\parskip
+}
+
+%%% Actually, 'abbrev' works just fine as the default
+%%% Bibliography style.
+
+\typeout{Using 'Abbrev' bibliography style}
+\newcommand\bibyear[2]{%
+    \unskip\quad\ignorespaces#1\unskip
+    \if#2..\quad \else \quad#2 \fi
+}
+\newcommand{\bibemph}[1]{{\em#1}}
+\newcommand{\bibemphic}[1]{{\em#1\/}}
+\newcommand{\bibsc}[1]{{\sc#1}}
+\def\@normalcite{%
+    \def\@cite##1##2{[##1\if@tempswa , ##2\fi]}%
+}
+\def\@citeNB{%
+    \def\@cite##1##2{##1\if@tempswa , ##2\fi}%
+}
+\def\@citeRB{%
+    \def\@cite##1##2{##1\if@tempswa , ##2\fi]}%
+}
+\def\start@cite#1#2{%
+    \edef\citeauthoryear##1##2##3{%
+        ###1%
+        \ifnum#2=\z@ \else\ ###2\fi
+    }%
+    \ifnum#1=\thr@@
+        \let\@@cite\@citeyear
+    \else
+        \let\@@cite\@citenormal
+    \fi
+    \@ifstar{\@citeNB\@@cite}{\@normalcite\@@cite}%
+}
+\def\cite{\start@cite23}
+\def\citeNP{\cite*}
+\def\citeA{\start@cite10}
+\def\citeANP{\citeA*}
+\def\shortcite{\start@cite23}
+\def\shortciteNP{\shortcite*}
+\def\shortciteA{\start@cite20}
+\def\shortciteANP{\shortciteA*}
+\def\citeyear{\start@cite30}
+\def\citeyearNP{\citeyear*}
+\def\citeN{%
+    \@citeRB
+    \def\citeauthoryear##1##2##3{##1\ [##3%
+        \def\reserved@a{##1}%
+        \def\citeauthoryear####1####2####3{%
+            \def\reserved@b{####1}%
+            \ifx\reserved@a\reserved@b
+                ####3%
+            \else
+                \errmessage{Package acmart Error: author mismatch
+                         in \string\citeN^^J^^J%
+                    See the acmart package documentation for explanation}%
+            \fi
+        }%
+    }%
+    \@ifstar\@citeyear\@citeyear
+}
+\def\shortciteN{%
+    \@citeRB
+    \def\citeauthoryear##1##2##3{##2\ [##3%
+        \def\reserved@a{##2}%
+        \def\citeauthoryear####1####2####3{%
+            \def\reserved@b{####2}%
+            \ifx\reserved@a\reserved@b
+                ####3%
+            \else
+                \errmessage{Package acmart Error: author mismatch
+                         in \string\shortciteN^^J^^J%
+                    See the acmart package documentation for explanation}%
+            \fi
+        }%
+    }%
+    \@ifstar\@citeyear\@citeyear  % GM July 2000
+}
+\def\@citenormal{%
+    \@ifnextchar [{\@tempswatrue\@citex;}
+                  {\@tempswafalse\@citex,[]}% % GM July 2000
+}
+\def\@citeyear{%
+    \@ifnextchar [{\@tempswatrue\@citex,}%
+                  {\@tempswafalse\@citex,[]}%
+}
+\def\@citex#1[#2]#3{%
+    \let\@citea\@empty
+    \@cite{%
+        \@for\@citeb:=#3\do{%
+            \@citea
+            \def\@citea{#1 }%
+            \edef\@citeb{\expandafter\@iden\@citeb}%
+            \if@filesw
+                \immediate\write\@auxout{\string\citation{\@citeb}}%
+            \fi
+            \@ifundefined{b@\@citeb}{%
+                {\bf ?}%
+                \@warning{%
+                    Citation `\@citeb' on page \thepage\space undefined%
+                }%
+            }%
+            {\csname b@\@citeb\endcsname}%
+        }%
+    }{#2}%
+}
+\let\@biblabel\@gobble
+\newdimen\bibindent
+\setcounter{enumi}{1}
+\bibindent=0em
+\def\thebibliography#1{%
+\ifnum\addauflag=0\addauthorsection\global\addauflag=1\fi
+    \section{%
+       {REFERENCES}
+         \vskip -9pt  % GM July 2000 (for tighter spacing)
+        \@mkboth{{\refname}}{{\refname}}%
+    }%
+    \list{[\arabic{enumi}]}{%
+        \settowidth\labelwidth{[#1]}%
+        \leftmargin\labelwidth
+        \advance\leftmargin\labelsep
+        \advance\leftmargin\bibindent
+        \parsep=0pt\itemsep=1pt % GM July 2000
+        \itemindent -\bibindent
+        \listparindent \itemindent
+        \usecounter{enumi}
+    }%
+    \let\newblock\@empty
+    \raggedright % GM July 2000
+    \sloppy
+    \sfcode`\.=1000\relax
+}
+
+
+\gdef\balancecolumns
+{\vfill\eject
+\global\@colht=\textheight
+\global\ht\@cclv=\textheight
+}
+
+\newcount\colcntr
+\global\colcntr=0
+\newbox\savebox
+
+\gdef \@makecol {%
+\global\advance\colcntr by 1
+\ifnum\colcntr>2 \global\colcntr=1\fi
+   \ifvoid\footins
+     \setbox\@outputbox \box\@cclv
+   \else
+     \setbox\@outputbox \vbox{%
+\boxmaxdepth \@maxdepth
+       \@tempdima\dp\@cclv
+       \unvbox \@cclv
+       \vskip-\@tempdima
+       \vskip \skip\footins
+       \color@begingroup
+         \normalcolor
+         \footnoterule
+         \unvbox \footins
+       \color@endgroup
+       }%
+   \fi
+   \xdef\@freelist{\@freelist\@midlist}%
+   \global \let \@midlist \@empty
+   \@combinefloats
+   \ifvbox\@kludgeins
+     \@makespecialcolbox
+   \else
+     \setbox\@outputbox \vbox to\@colht {%
+\@texttop
+       \dimen@ \dp\@outputbox
+       \unvbox \@outputbox
+   \vskip -\dimen@
+       \@textbottom
+       }%
+   \fi
+   \global \maxdepth \@maxdepth
+}
+\def\titlenote{\@ifnextchar[\@xtitlenote{\stepcounter\@mpfn
+\global\advance\titlenotecount by 1
+\ifnum\titlenotecount=1
+    \raisebox{9pt}{$\ast$}
+\fi
+\ifnum\titlenotecount=2
+    \raisebox{9pt}{$\dagger$}
+\fi
+\ifnum\titlenotecount=3
+    \raisebox{9pt}{$\ddagger$}
+\fi
+\ifnum\titlenotecount=4
+\raisebox{9pt}{$\S$}
+\fi
+\ifnum\titlenotecount=5
+\raisebox{9pt}{$\P$}
+\fi
+         \@titlenotetext
+}}
+
+\long\def\@titlenotetext#1{\insert\footins{%
+\ifnum\titlenotecount=1\global\tntoks={#1}\fi
+\ifnum\titlenotecount=2\global\tntokstwo={#1}\fi
+\ifnum\titlenotecount=3\global\tntoksthree={#1}\fi
+\ifnum\titlenotecount=4\global\tntoksfour={#1}\fi
+\ifnum\titlenotecount=5\global\tntoksfive={#1}\fi
+    \reset@font\footnotesize
+    \interlinepenalty\interfootnotelinepenalty
+    \splittopskip\footnotesep
+    \splitmaxdepth \dp\strutbox \floatingpenalty \@MM
+    \hsize\columnwidth \@parboxrestore
+    \protected@edef\@currentlabel{%
+    }%
+    \color@begingroup
+   \color@endgroup}}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\ps@plain
+\baselineskip=11pt
+\let\thepage\relax % For NO page numbers - GM Nov. 30th. 1999 and July 2000
+\def\setpagenumber#1{\global\setcounter{page}{#1}}
+%\pagenumbering{arabic}  % Arabic page numbers GM July 2000
+\twocolumn             % Double column.
+\flushbottom           % Even bottom -- alas, does not balance columns at end of document
+\pagestyle{plain}
+
+% Need Copyright Year and Copyright Data to be user definable (in .tex file).
+% Gerry Nov. 30th. 1999
+\newtoks\copyrtyr
+\newtoks\acmcopyr
+\newtoks\boilerplate
+%...
+%... Lisa needed the following to be an integral part of this particular class file
+%...
+%Copyright is held by the author/owner(s).
+%WWW2005, May 10--14, 2005, Chiba, Japan.
+%ACM 1-58113-680-3/04/0005.
+%
+%...
+%...
+%...
+%\global\acmcopyr={1-58113-449-5/02/0005}  % Default
+\global\acmcopyr={xxx}  % Default
+\global\copyrtyr={2003}                % Default - 12/4/2000 *** Gerry
+\def\CopyrightYear#1{\global\copyrtyr{#1}}
+\def\crdata#1{\global\acmcopyr{#1}}
+\def\permission#1{\global\boilerplate{#1}}
+%
+\global\boilerplate={Copyright is held by the author/owner(s).\\ {\it Operating System Reviews}}
+\newtoks\copyrightetc
+% original \global\copyrightetc{ACM 1-58113-844-X/04/0005.\the\acmcopyr}
+% commented out following line---cew
+%\global\copyrightetc{ACM 1-58113-844-X/04/0005}  % "xxx" not needed after the period.
+
+\toappear{\the\boilerplate\par
+{\confname{\the\conf}} \the\confinfo\par \the\copyrightetc.}
+%
+%% End of www2005-submission.cls -- V1.4 - 1/26/2004 --
+%% Gerry Murray -- Monday January 26th. 2004
diff --git a/doc/common/chngpage.sty b/doc/common/chngpage.sty
index 6381292d24ffd0effad9ced42c2ef8366fe67d9a..ff255f6537baf8e8315e072e5eb5adf35dc28fa3 100644
--- a/doc/common/chngpage.sty
+++ b/doc/common/chngpage.sty
@@ -52,7 +52,7 @@
         \expandafter\expandafter\expandafter\@gobble
         \csname r@#1\endcsname\@nil
       \fi}}{}}
-      
+
 %% User level command to check for odd/even page
 \DeclareRobustCommand{\checkoddpage}{%
   \stepcounter{cp@cnt}\label{\cplabelprefix\thecp@cnt}%
@@ -138,21 +138,21 @@
 % \changetext
 %%%%%%%%%%%%%
 %     The \changetext command is for changing the size and horizontal position
-% of the text block on a page. The command takes 5 arguments, each of which 
+% of the text block on a page. The command takes 5 arguments, each of which
 % is a length or is empty. i.e.,
 %
 % \changetext{textheight}{textwidth}{evensidemargin}{oddsidemargin}{columnsep}
 %
 % The given lengths are added to the corresponding current lengths and
-% the remainder of the current page is typeset using the changed text block 
-% layout. The new layout remains in effect until another \change... command 
+% the remainder of the current page is typeset using the changed text block
+% layout. The new layout remains in effect until another \change... command
 % is issued.
 %
 %%%%%%%%%%%%%
 % \changepage
 %%%%%%%%%%%%%
 %     The \changepage command is for changing the general layout of
-% a page. The command takes 9 arguments, each of which is a length or is empty. 
+% a page. The command takes 9 arguments, each of which is a length or is empty.
 % The first 5 arguments are the same as for \changetext and have the same effect.
 % The last four arguments are:
 %
@@ -160,8 +160,8 @@
 %
 % These lengths are added to the corresponding current lengths and
 % thus modify the vertical positions of the elements of the page. The
-% remainder of the current page is typeset using the changed text block 
-% and page layout. The new layout remains in effect until another 
+% remainder of the current page is typeset using the changed text block
+% and page layout. The new layout remains in effect until another
 % \change... command is issued.
 %
 % NOTE 1: Not supplying a value for a length argument is equivalent
@@ -183,7 +183,7 @@
 % \changetext{-5\baselineskip}{10em}{-5em}{-5em}{}
 % \twocolumn
 % ... two column pages
-% \clearpage 
+% \clearpage
 % \changetext{5\baselineskip}{-10em}{5em}{5em}{}
 % \onecolumn
 % ... normal pages
@@ -210,7 +210,7 @@
 %%%%%%%%%%%%%
 %    Within an adjustwidth environment the left and right margins can be
 % adjusted. The environment takes one optional argument and two required
-% length arguments: 
+% length arguments:
 %
 % \begin{adjustwidth}[]{leftmargin}{rightmargin}
 %
@@ -262,7 +262,7 @@
 % can be turned on by putting the command \cpstricttrue before
 % the environment, and turned of by using \cpstrictfalse.
 %
-%     NOTE: In a twocolumn document, the adjustwidth environment 
+%     NOTE: In a twocolumn document, the adjustwidth environment
 % treats both columns equally. For example, if the width is meant
 % to be wider at the outer margin, then on odd pages the extra width
 % will be at the right of any column, and on even pages the extra
@@ -280,8 +280,8 @@
 %
 %    This works by the \checkoddpage command generating a label and
 % then checking the \pageref for the label (actually, a special version
-% of \pageref is required and is used internally by \checkoddpage). 
-% This mechanism requires at least two LaTeX passes to ensure that 
+% of \pageref is required and is used internally by \checkoddpage).
+% This mechanism requires at least two LaTeX passes to ensure that
 % the labels have settled (on the initial pass there will be no labels
 % in the *.aux file to be checked).
 %
@@ -290,7 +290,7 @@
 % defined as `^_', can be changed in the preamble if it will cause
 % a clash with any author-defined labels. The default labels will
 % be of the form `^_N' where N is a positive integer.
-% 
+%
 % Changes in version 1.1c (2001/02/24)
 % -----------------------
 % o Fixed problem when used with the calc package
@@ -302,17 +302,17 @@
 % -----------------------
 % o Added strict option for robust adjustwidth; checks odd/even
 %   pages via labels instead of by the page counter.
-% 
+%
 % Changes in version 1.1a (2001/01/18)
 % -----------------------
 % o Added missing {} in last 4 arguments of \changepage
-% 
+%
 % Changes in version 1.1 (2000/07/22)
 % ----------------------
 % o Empty arguments made available
 % o Added adjustwidth environment
-% 
-% 
+%
+%
 % Peter W.
 %
 %
diff --git a/doc/common/llnlCoverPage.tex b/doc/common/llnlCoverPage.tex
index 8e0a24cfbca4052a79b1bdc08a709cdb0cdf12d6..e99e5f224e542939d7006fe6e796b797e66bfe3f 100644
--- a/doc/common/llnlCoverPage.tex
+++ b/doc/common/llnlCoverPage.tex
@@ -5,10 +5,10 @@
 %     \makeLLNLCover{UCRL}{Title}{Authors}{Journal}{Date}{hShift}{vShift}
 %  and
 %     \makeLLNLBackCover
-%      
+%
 % where
 %
-%  UCRL: The UCRL (6 digit) number (which you probably won't know before the document 
+%  UCRL: The UCRL (6 digit) number (which you probably won't know before the document
 %        is released so just make up a number)
 %  Title: title of the article
 %  Authors: Authors separated by \\
@@ -20,9 +20,9 @@
 % Here is an example:
 %  \makeLLNLCover{123456}{An adaptive numerical method for high-speed reactive flows}{William D. Henshaw\\%
 %   Donald W. Schwendeman}{Journal of Computational Physics}{January 1, 2003}{0in}{0in}
-% 
+%
 % *****************************************************************************************************************
-% 
+%
 \newcommand{\setPageForLLNLCover}[2]{%
 \newlength{\textwidthOld}%
 \setlength{\textwidthOld}{\textwidth}%
@@ -55,7 +55,7 @@
 \newcommand{\makeLLNLCover}[7]{%
 \setPageForLLNLCover{#6}{#7}%
 \thispagestyle{empty}% no number of this page
-\newcommand{\logoWidth}{1.65in}% 
+\newcommand{\logoWidth}{1.65in}%
 \psset{xunit=1.cm,yunit=1.cm,runit=1.cm}%
 \begin{pspicture}(0,0)(17,24.)
 % turn on the grid for placement
@@ -104,7 +104,7 @@ Approved for public release; further dissemination unlimited}}
 \end{pspicture}
 % }
 %
-\clearpage 
+\clearpage
 % -------------- back of front cover -------------------------
 \changetext{.625in}{}{}{}{}
 \thispagestyle{empty}% no number of this page
@@ -149,15 +149,15 @@ Laboratory under Contract No. W-7405-Eng-48.
 \newcommand{\makeLLNLBackCover}{%
 \clearpage
 \setPageForLLNLBackCover
-% jg - suppress printing of essentially blank page here 
+% jg - suppress printing of essentially blank page here
 %\changetext{.625in}{}{}{}{}
 %\thispagestyle{empty}% no number of this page
-\ \ 
+\ \
 %\vfill
 %\begin{center}
 %Approved for public release; further dissemination unlimited
 %\end{center}
-%\clearpage 
+%\clearpage
 %\clearpage
 %\changetext{-.625in}{}{}{}{}
 % ---------------------------------------------------------------------------
diff --git a/doc/common/project.bib b/doc/common/project.bib
index f64a99aa9c551c60ed8b2af3cf8cbd29241dc23c..0cd2d4ab04e2bcaa5ab8c3eff6c5ff1d9fc63c15 100644
--- a/doc/common/project.bib
+++ b/doc/common/project.bib
@@ -1,8 +1,8 @@
 @MISC
 {
-    Authd2002, 
+    Authd2002,
     AUTHOR      = {Brad Chun},
-    TITLE       = {{Authd}}, 
+    TITLE       = {{Authd}},
     HOWPUBLISHED= {http://www.theether.org/authd/},
     YEAR        = {2002},
 }
@@ -30,7 +30,7 @@
 
 @MISC
 {
-    BlueGeneWeb, 
+    BlueGeneWeb,
     AUTHOR      = {{{IBM}}},
     TITLE       = {{Blue Gene Home Page}},
     HOWPUBLISHED= {http://www.research.ibm.com/bluegene},
@@ -39,7 +39,7 @@
 
 @MISC
 {
-    DPCS2002, 
+    DPCS2002,
     AUTHOR      = {{Lawrence Livermore National Laboratory}},
     TITLE       = {{Distributed Production Control System (DPCS).}},
     HOWPUBLISHED= {http://www.llnl.gov/icc/lc/dpcs/dpcs\_overview.html},
@@ -48,7 +48,7 @@
 
 @MISC
 {
-    Etnus2002, 
+    Etnus2002,
     AUTHOR      = {{Etnus, LLC.}},
     TITLE       = {{Etnus Home Page}},
     HOWPUBLISHED= {http://www.etnus.com},
@@ -57,16 +57,16 @@
 
 @MISC
 {
-    Globus2002, 
+    Globus2002,
     AUTHOR      = {{The Globus Project}},
     TITLE       = {{The Globus Project}},
     HOWPUBLISHED= {http://www.globus.org},
-    YEAR        = {2003}, 
+    YEAR        = {2003},
 }
 
 @MISC
 {
-    GPL2002, 
+    GPL2002,
     AUTHOR      = {{The GNU Project}},
     TITLE       = {{The GNU Public License}},
     HOWPUBLISHED= {http://www.gnu.org/licenses/licenses.html},
@@ -74,21 +74,21 @@
 
 @CONFERENCE
 {
-    Jackson2001, 
+    Jackson2001,
     AUTHOR    = {D. Jackson and Q. Snell and M. Clement},
     TITLE     = {{Core Algorithms of the Maui Scheduler}},
     BOOKTITLE = {Job Scheduling Stategies for Parallel Processing},
     PUBLISHER = {Springer-Verlag},
     VOLUME    = {2221},
     PAGES     = {87-102},
-    ADDRESS   = {{7th International Workshop, JSSP 2001, Cambridge, MA}}, 
+    ADDRESS   = {{7th International Workshop, JSSP 2001, Cambridge, MA}},
     YEAR      = {2001},
 }
 
 
 @MISC
 {
-    Jette2002, 
+    Jette2002,
     AUTHOR      = {M. Jette and others},
     TITLE       = {{Survey of Batch/Resource Management Related System Software}},
     HOWPUBLISHED= {{Lawrence Livermore National Laboratory}},
@@ -98,7 +98,7 @@
 
 @CONFERENCE
 {
-    Jones2003, 
+    Jones2003,
     AUTHOR      = {T. Jones and S. Dawson and R. Neely and W. Tuel and L. Brenner and J. Fier and R. Blackmore and P. Caffrey and B. Maskell and P. Tomlinson and M. Roberts},
     TITLE       = {{Improving scalability of parallel jobs by adding parallel awareness to the operating system}},
     BOOKTITLE   = {Proceedings of Supercomputing 2003},
@@ -108,7 +108,7 @@
 
 @CONFERENCE
 {
-    Kerbyson2001, 
+    Kerbyson2001,
     AUTHOR      = {D. J. Kerbyson and J. J. Alme and A. Hoisie and F. Petrini and H. J. Wasserman and M. Gittings},
     TITLE       = {{Predictive performance and scalability modeling of a large-scale application}},
     BOOKTITLE   = {Proceedings of Supercomputing 2001},
@@ -119,8 +119,8 @@
 
 @MISC
 {
-    LL2002, 
-    AUTHOR      = {{IBM}}, 
+    LL2002,
+    AUTHOR      = {{IBM}},
     TITLE       = {{LoadLeveler -- Efficient job scheduling and management}},
     HOWPUBLISHED= {\linebreak http://www-1.ibm.com/servers/eserver/pseries/library/sp\_books/loadleveler.html},
     YEAR        = {2003},
@@ -128,7 +128,7 @@
 
 @MISC
 {
-    Maui2002, 
+    Maui2002,
     AUTHOR      = {{Maui Scheduler}},
     TITLE       = {{Maui Scheduler}},
     HOWPUBLISHED= {http://supercluster.org/maui},
@@ -137,7 +137,7 @@
 
 @CONFERENCE
 {
-    Petrini2003, 
+    Petrini2003,
     AUTHOR      = {F. Petrini and D. J. Kerbyson and S. Pakin},
     TITLE       = {{The case of missing supercomputer performance: Achieving Optimal performance on the 8,192 processor ASCI Q}},
     BOOKTITLE   = {Proceedings of Supercomputing 2003},
@@ -157,7 +157,7 @@
 
 @MISC
 {
-    Quadrics2002, 
+    Quadrics2002,
     AUTHOR      = {{Quadrics Ltd.}},
     TITLE       = {{Resource Management (RMS)}},
     HOWPUBLISHED= {http://www.quadrics.com/},
@@ -166,7 +166,7 @@
 
 @CONFERENCE
 {
-    SLURM2003, 
+    SLURM2003,
     AUTHOR      = {M. Jette and M. Grondona},
     TITLE       = {{SLURM: Simple Linux Utility for Resource Management}},
     BOOKTITLE   = {Proceedings of ClusterWorld 2003},
@@ -185,18 +185,18 @@
 
 @CONFERENCE
 {
-    STORM2001, 
+    STORM2001,
     AUTHOR    = {E. Frachtenberg and others},
     TITLE     = {{STORM: Lightning-Fast Resource Management}},
     BOOKTITLE = {Proceedings of SuperComputing 2002},
-    ADDRESS   = {Baltimore, MD}, 
+    ADDRESS   = {Baltimore, MD},
     YEAR      = {2002},
     NOTE      = {Available from http://www.cs.huji.ac.il/$\sim$etcs/papers/sc02.pdf},
 }
 
 @CONFERENCE
 {
-    Tsafrir2005, 
+    Tsafrir2005,
     AUTHOR    = {D. Tsafrir and Y. Etsion and D. G. Feitelson and S Kirkpatric},
     TITLE     = {{System Noise, OS Clock Ticks, and Fine-Grained Parallel Applications}},
     BOOKTITLE = {19th ACM International Conference on Supercomputing (ICS)},
@@ -206,14 +206,14 @@
 
 @CONFERENCE
 {
-    Yoo2003, 
+    Yoo2003,
     AUTHOR    = {A. Yoo and M. A. Jette and M. Grondona},
     TITLE     = {{SLURM: Simple Linux Utility for Resource Management}},
     BOOKTITLE = {Lecture Notes in Computer Science},
     PUBLISHER = {Springer-Verlag},
     VOLUME    = {2862},
     PAGES     = {44-60},
-    ADDRESS   = {{9th International Workshop, JSSP 2003, Seattle, WA}}, 
+    ADDRESS   = {{9th International Workshop, JSSP 2003, Seattle, WA}},
     YEAR      = {2003},
 }
 
diff --git a/doc/figures/README b/doc/figures/README
index a4305b9175ed56ab7925a97afcf75a0242e78cd5..d0b16b2f6108ff34940f30c406daaa62d097bbb7 100644
--- a/doc/figures/README
+++ b/doc/figures/README
@@ -1,8 +1,8 @@
-Most of these diagrams (with the ".dia" suffix) were built using the 
+Most of these diagrams (with the ".dia" suffix) were built using the
 tool "dia" (http://www.gnome.org/projects/dia/).
 
-gnuplot is used with "times.gpl" and ".dat" suffix files to build a 
+gnuplot is used with "times.gpl" and ".dat" suffix files to build a
 timing plot.
 
-The PostScript images of these can be built as needed by the Makefile 
-in "../jsspp" and "../pubdesign". 
+The PostScript images of these can be built as needed by the Makefile
+in "../jsspp" and "../pubdesign".
diff --git a/doc/figures/allocate-init.dia b/doc/figures/allocate-init.dia
index 06aba5569368b40611f42e52567e714d0523103e..e9680b10bbd246ad0343750a3f13299ffc0351ac 100644
--- a/doc/figures/allocate-init.dia
+++ b/doc/figures/allocate-init.dia
@@ -1,3 +1,3 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/"><dia:diagramdata><dia:attribute name="background"><dia:color val="#ffffff"/></dia:attribute><dia:attribute name="paper"><dia:composite type="paper"><dia:attribute name="name"><dia:string>#A4#</dia:string></dia:attribute><dia:attribute name="tmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="bmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="lmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="rmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="is_portrait"><dia:boolean val="true"/></dia:attribute><dia:attribute name="scaling"><dia:real val="1"/></dia:attribute><dia:attribute name="fitto"><dia:boolean val="false"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="grid"><dia:composite type="grid"><dia:attribute name="width_x"><dia:real val="0.25"/></dia:attribute><dia:attribute name="width_y"><dia:real val="0.125"/></dia:attribute><dia:attribute name="visible_x"><dia:int val="1"/></dia:attribute><dia:attribute name="visible_y"><dia:int val="1"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="guides"><dia:composite type="guides"><dia:attribute name="hguides"/><dia:attribute name="vguides"/></dia:composite></dia:attribute></dia:diagramdata><dia:layer name="Background" visible="true"><dia:object type="Standard - Line" version="0" id="O0"><dia:attribute name="obj_pos"><dia:point val="21.75,9.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,9.075;21.8,13.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.75,9.125"/><dia:point val="21.75,13"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O45" connection="1"/></dia:connections></dia:object><dia:group><dia:object type="Standard - PolyLine" version="0" id="O1"><dia:attribute name="obj_pos"><dia:point val="1.5,2.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="0.555625,1.46145;2.90369,4.225"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="1.5,2.25"/><dia:point val="0.9125,1.5"/><dia:point val="0.625,1.7375"/><dia:point val="1.3875,2.7125"/><dia:point val="0.975,4.175"/><dia:point val="1.4375,4.175"/><dia:point val="1.6875,3.225"/><dia:point val="1.925,4.175"/><dia:point val="2.35,4.175"/><dia:point val="2.025,2.925"/><dia:point val="2.6625,3.3"/><dia:point val="2.8375,2.975"/><dia:point val="1.9625,2.425"/><dia:point val="1.925,2.2375"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#010101"/></dia:attribute></dia:object><dia:object type="Standard - Arc" version="0" id="O2"><dia:attribute name="obj_pos"><dia:point val="1.9,2.275"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.33602,1.59571;2.11398,2.325"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="1.9,2.275"/><dia:point val="1.55,2.275"/></dia:attribute><dia:attribute name="arc_color"><dia:color val="#010101"/></dia:attribute><dia:attribute name="curve_distance"><dia:real val="0.629294"/></dia:attribute></dia:object></dia:group><dia:object type="Standard - Text" version="0" id="O3"><dia:attribute name="obj_pos"><dia:point val="1.5,1.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1,0.75;2,1.6"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#User#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.65"/></dia:attribute><dia:attribute name="pos"><dia:point val="1.5,1.25"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O4"><dia:attribute name="obj_pos"><dia:point val="14.5,2.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.45,2.2;14.55,18.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.5,2.25"/><dia:point val="14.5,18.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b3b3b3"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O5"><dia:attribute name="obj_pos"><dia:point val="19.75,2.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.7,2.2;19.8,18.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="19.75,2.25"/><dia:point val="19.75,18.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b1b1b1"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O6"><dia:attribute name="obj_pos"><dia:point val="14.5,1.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.375,1.3;15.625,2.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmctld#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,1.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O7"><dia:attribute name="obj_pos"><dia:point val="19.75,1.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19,1.3;20.5,2.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="19.75,1.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O8"><dia:attribute name="obj_pos"><dia:point val="9,2.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.96,2.35001;14.29,3.15001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="9,2.75"/><dia:point val="14.25,2.75001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O31" connection="2"/><dia:connection handle="1" to="O9" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O9"><dia:attribute name="obj_pos"><dia:point val="14.25,2.75001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.2,2.70001;14.8,3.80001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.25,2.75001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bb"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O10"><dia:attribute name="obj_pos"><dia:point val="14.25,3.75001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.96,3.35;14.29,4.15"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.25,3.75001"/><dia:point val="9,3.75"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O9" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O11"><dia:attribute name="obj_pos"><dia:point val="11.75,2.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.55,2.125;12.95,2.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#allocate req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.75,2.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O12"><dia:attribute name="obj_pos"><dia:point val="12,3.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.625,3.125;13.375,3.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#allocate reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="12,3.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O13"><dia:attribute name="obj_pos"><dia:point val="7.75,7.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,7.1;19.54,7.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.75,7.5"/><dia:point val="19.5,7.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O17" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O14"><dia:attribute name="obj_pos"><dia:point val="13.5,7.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="11.875,6.875;15.125,7.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="13.5,7.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O15"><dia:attribute name="obj_pos"><dia:point val="14.25,6.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,6.1;14.29,6.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.25,6.5"/><dia:point val="7.25,6.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O55" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O16"><dia:attribute name="obj_pos"><dia:point val="11.25,6.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.85,5.875;12.65,6.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,6.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O17"><dia:attribute name="obj_pos"><dia:point val="19.5,7.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.45,7.45;20.05,9.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="19.5,7.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.500003"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O18"><dia:attribute name="obj_pos"><dia:point val="20.75,8.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.75,7.96125;22.1638,8.6525"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="20.75,8.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O19"><dia:attribute name="obj_pos"><dia:point val="19.75,9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.725,8.7;20.525,9.3"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="19.75,9"/><dia:point val="20.5,9"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O17" connection="6"/><dia:connection handle="1" to="O26" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O20"><dia:attribute name="obj_pos"><dia:point val="21.5,9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.5,8.58625;23.7138,9.2775"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#session_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="21.5,9"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O21"><dia:attribute name="obj_pos"><dia:point val="21.5,9.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,9.24999;21.54,10.25"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.5,9.75"/><dia:point val="7.75,9.74999"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="start_arrow"><dia:enum val="13"/></dia:attribute><dia:attribute name="start_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="start_arrow_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="3"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.5"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O22"><dia:attribute name="obj_pos"><dia:point val="14.5,9.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.525,9.125;16.475,9.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#connect(stdout/err)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,9.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O23"><dia:attribute name="obj_pos"><dia:point val="21.5,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,13.1;21.54,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.5,13.5"/><dia:point val="7.75,13.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O45" connection="3"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O24"><dia:attribute name="obj_pos"><dia:point val="17,13.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.575,12.875;18.425,13.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#task exit msg#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="17,13.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O25"><dia:attribute name="obj_pos"><dia:point val="20.75,8.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.7,8.575;20.8,14.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="20.75,8.625"/><dia:point val="20.75,14"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O28" connection="1"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O26"><dia:attribute name="obj_pos"><dia:point val="20.5,9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.45,8.95;21.05,9.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="20.5,9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O27"><dia:attribute name="obj_pos"><dia:point val="21.5,9.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.45,9.45;22.05,10.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.5,9.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O28"><dia:attribute name="obj_pos"><dia:point val="20.5,14"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.45,13.95;21.05,15.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="20.5,14"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O29"><dia:attribute name="obj_pos"><dia:point val="19.5,15"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.45,14.95;20.05,15.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="19.5,15"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O30"><dia:attribute name="obj_pos"><dia:point val="9.25,1.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.75,1.3;9.75,2.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="9.25,1.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O31"><dia:attribute name="obj_pos"><dia:point val="8.5,2.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.45,2.7;9.05,4.3"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="8.5,2.75"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O32"><dia:attribute name="obj_pos"><dia:point val="3,2.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="2.95,2.35;8.55,3.15"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="3,2.75"/><dia:point val="8.5,2.75"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O31" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O33"><dia:attribute name="obj_pos"><dia:point val="8.75,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="2.95,16.1;8.8,16.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="8.75,16.5"/><dia:point val="3,16.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O62" connection="6"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O34"><dia:attribute name="obj_pos"><dia:point val="14.5,11"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.71,10.6;14.54,11.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.5,11"/><dia:point val="8.75,11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="2"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O35"><dia:attribute name="obj_pos"><dia:point val="8.75,11.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.71,10.725;14.54,11.525"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="8.75,11.125"/><dia:point val="14.5,11.125"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O36"><dia:attribute name="obj_pos"><dia:point val="2.25,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.6,16;2.9,17.4"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#exit 
+<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/"><dia:diagramdata><dia:attribute name="background"><dia:color val="#ffffff"/></dia:attribute><dia:attribute name="paper"><dia:composite type="paper"><dia:attribute name="name"><dia:string>#A4#</dia:string></dia:attribute><dia:attribute name="tmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="bmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="lmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="rmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="is_portrait"><dia:boolean val="true"/></dia:attribute><dia:attribute name="scaling"><dia:real val="1"/></dia:attribute><dia:attribute name="fitto"><dia:boolean val="false"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="grid"><dia:composite type="grid"><dia:attribute name="width_x"><dia:real val="0.25"/></dia:attribute><dia:attribute name="width_y"><dia:real val="0.125"/></dia:attribute><dia:attribute name="visible_x"><dia:int val="1"/></dia:attribute><dia:attribute name="visible_y"><dia:int val="1"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="guides"><dia:composite type="guides"><dia:attribute name="hguides"/><dia:attribute name="vguides"/></dia:composite></dia:attribute></dia:diagramdata><dia:layer name="Background" visible="true"><dia:object type="Standard - Line" version="0" id="O0"><dia:attribute name="obj_pos"><dia:point val="21.75,9.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,9.075;21.8,13.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.75,9.125"/><dia:point val="21.75,13"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O45" connection="1"/></dia:connections></dia:object><dia:group><dia:object type="Standard - PolyLine" version="0" id="O1"><dia:attribute name="obj_pos"><dia:point val="1.5,2.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="0.555625,1.46145;2.90369,4.225"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="1.5,2.25"/><dia:point val="0.9125,1.5"/><dia:point val="0.625,1.7375"/><dia:point val="1.3875,2.7125"/><dia:point val="0.975,4.175"/><dia:point val="1.4375,4.175"/><dia:point val="1.6875,3.225"/><dia:point val="1.925,4.175"/><dia:point val="2.35,4.175"/><dia:point val="2.025,2.925"/><dia:point val="2.6625,3.3"/><dia:point val="2.8375,2.975"/><dia:point val="1.9625,2.425"/><dia:point val="1.925,2.2375"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#010101"/></dia:attribute></dia:object><dia:object type="Standard - Arc" version="0" id="O2"><dia:attribute name="obj_pos"><dia:point val="1.9,2.275"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.33602,1.59571;2.11398,2.325"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="1.9,2.275"/><dia:point val="1.55,2.275"/></dia:attribute><dia:attribute name="arc_color"><dia:color val="#010101"/></dia:attribute><dia:attribute name="curve_distance"><dia:real val="0.629294"/></dia:attribute></dia:object></dia:group><dia:object type="Standard - Text" version="0" id="O3"><dia:attribute name="obj_pos"><dia:point val="1.5,1.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1,0.75;2,1.6"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#User#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.65"/></dia:attribute><dia:attribute name="pos"><dia:point val="1.5,1.25"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O4"><dia:attribute name="obj_pos"><dia:point val="14.5,2.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.45,2.2;14.55,18.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.5,2.25"/><dia:point val="14.5,18.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b3b3b3"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O5"><dia:attribute name="obj_pos"><dia:point val="19.75,2.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.7,2.2;19.8,18.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="19.75,2.25"/><dia:point val="19.75,18.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b1b1b1"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O6"><dia:attribute name="obj_pos"><dia:point val="14.5,1.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.375,1.3;15.625,2.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmctld#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,1.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O7"><dia:attribute name="obj_pos"><dia:point val="19.75,1.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19,1.3;20.5,2.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="19.75,1.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O8"><dia:attribute name="obj_pos"><dia:point val="9,2.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.96,2.35001;14.29,3.15001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="9,2.75"/><dia:point val="14.25,2.75001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O31" connection="2"/><dia:connection handle="1" to="O9" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O9"><dia:attribute name="obj_pos"><dia:point val="14.25,2.75001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.2,2.70001;14.8,3.80001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.25,2.75001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bb"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O10"><dia:attribute name="obj_pos"><dia:point val="14.25,3.75001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.96,3.35;14.29,4.15"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.25,3.75001"/><dia:point val="9,3.75"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O9" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O11"><dia:attribute name="obj_pos"><dia:point val="11.75,2.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.55,2.125;12.95,2.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#allocate req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.75,2.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O12"><dia:attribute name="obj_pos"><dia:point val="12,3.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.625,3.125;13.375,3.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#allocate reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="12,3.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O13"><dia:attribute name="obj_pos"><dia:point val="7.75,7.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,7.1;19.54,7.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.75,7.5"/><dia:point val="19.5,7.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O17" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O14"><dia:attribute name="obj_pos"><dia:point val="13.5,7.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="11.875,6.875;15.125,7.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="13.5,7.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O15"><dia:attribute name="obj_pos"><dia:point val="14.25,6.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,6.1;14.29,6.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.25,6.5"/><dia:point val="7.25,6.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O55" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O16"><dia:attribute name="obj_pos"><dia:point val="11.25,6.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.85,5.875;12.65,6.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,6.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O17"><dia:attribute name="obj_pos"><dia:point val="19.5,7.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.45,7.45;20.05,9.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="19.5,7.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.500003"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O18"><dia:attribute name="obj_pos"><dia:point val="20.75,8.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.75,7.96125;22.1638,8.6525"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="20.75,8.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O19"><dia:attribute name="obj_pos"><dia:point val="19.75,9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.725,8.7;20.525,9.3"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="19.75,9"/><dia:point val="20.5,9"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O17" connection="6"/><dia:connection handle="1" to="O26" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O20"><dia:attribute name="obj_pos"><dia:point val="21.5,9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.5,8.58625;23.7138,9.2775"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#session_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="21.5,9"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O21"><dia:attribute name="obj_pos"><dia:point val="21.5,9.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,9.24999;21.54,10.25"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.5,9.75"/><dia:point val="7.75,9.74999"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="start_arrow"><dia:enum val="13"/></dia:attribute><dia:attribute name="start_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="start_arrow_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="3"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.5"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O22"><dia:attribute name="obj_pos"><dia:point val="14.5,9.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.525,9.125;16.475,9.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#connect(stdout/err)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,9.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O23"><dia:attribute name="obj_pos"><dia:point val="21.5,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,13.1;21.54,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.5,13.5"/><dia:point val="7.75,13.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O45" connection="3"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O24"><dia:attribute name="obj_pos"><dia:point val="17,13.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.575,12.875;18.425,13.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#task exit msg#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="17,13.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O25"><dia:attribute name="obj_pos"><dia:point val="20.75,8.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.7,8.575;20.8,14.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="20.75,8.625"/><dia:point val="20.75,14"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O28" connection="1"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O26"><dia:attribute name="obj_pos"><dia:point val="20.5,9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.45,8.95;21.05,9.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="20.5,9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O27"><dia:attribute name="obj_pos"><dia:point val="21.5,9.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.45,9.45;22.05,10.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.5,9.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O28"><dia:attribute name="obj_pos"><dia:point val="20.5,14"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.45,13.95;21.05,15.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="20.5,14"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O29"><dia:attribute name="obj_pos"><dia:point val="19.5,15"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.45,14.95;20.05,15.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="19.5,15"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O30"><dia:attribute name="obj_pos"><dia:point val="9.25,1.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.75,1.3;9.75,2.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="9.25,1.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O31"><dia:attribute name="obj_pos"><dia:point val="8.5,2.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.45,2.7;9.05,4.3"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="8.5,2.75"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O32"><dia:attribute name="obj_pos"><dia:point val="3,2.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="2.95,2.35;8.55,3.15"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="3,2.75"/><dia:point val="8.5,2.75"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O31" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O33"><dia:attribute name="obj_pos"><dia:point val="8.75,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="2.95,16.1;8.8,16.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="8.75,16.5"/><dia:point val="3,16.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O62" connection="6"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O34"><dia:attribute name="obj_pos"><dia:point val="14.5,11"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.71,10.6;14.54,11.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.5,11"/><dia:point val="8.75,11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="2"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O35"><dia:attribute name="obj_pos"><dia:point val="8.75,11.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.71,10.725;14.54,11.525"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="8.75,11.125"/><dia:point val="14.5,11.125"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O36"><dia:attribute name="obj_pos"><dia:point val="2.25,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.6,16;2.9,17.4"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#exit
 status#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="2.25,16.5"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O37"><dia:attribute name="obj_pos"><dia:point val="22.5,10.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.45,10.45;23.05,13.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="22.5,10.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O38"><dia:attribute name="obj_pos"><dia:point val="23,9.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23,9.1;23,9.75"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>##</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="23,9.5"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O39"><dia:attribute name="obj_pos"><dia:point val="22.5,10.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.5,9.83625;23.1137,10.5275"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#cmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="22.5,10.25"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O40"><dia:attribute name="obj_pos"><dia:point val="20.75,9.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.725,9.2;21.525,9.8"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="20.75,9.5"/><dia:point val="21.5,9.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O26" connection="6"/><dia:connection handle="1" to="O27" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O41"><dia:attribute name="obj_pos"><dia:point val="20.75,15"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.975,14.7;20.775,15.3"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="20.75,15"/><dia:point val="20,15"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O28" connection="6"/><dia:connection handle="1" to="O29" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O42"><dia:attribute name="obj_pos"><dia:point val="22,10.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.975,10.2;22.525,10.8"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22,10.5"/><dia:point val="22.5,10.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O27" connection="7"/><dia:connection handle="1" to="O37" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O43"><dia:attribute name="obj_pos"><dia:point val="22.5,13"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.975,12.7;22.525,13.3"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22.5,13"/><dia:point val="22,13"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O37" connection="5"/><dia:connection handle="1" to="O45" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O44"><dia:attribute name="obj_pos"><dia:point val="4.5,2.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.425,2.225;5.575,2.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun allocate#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="80" name="Helvetica-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="4.5,2.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O45"><dia:attribute name="obj_pos"><dia:point val="21.5,13"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.45,12.95;22.05,14.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.5,13"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O46"><dia:attribute name="obj_pos"><dia:point val="8.75,4.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.7,4.2;8.8,15.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="8.75,4.25"/><dia:point val="8.75,15.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O31" connection="6"/><dia:connection handle="1" to="O62" connection="1"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O47"><dia:attribute name="obj_pos"><dia:point val="8.75,4.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.25,3.85;8.75,4.65"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="8.75,4.25"/><dia:point val="4.25,4.25"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O31" connection="6"/><dia:connection handle="1" to="O48" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O48"><dia:attribute name="obj_pos"><dia:point val="3.75,4.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.7,4.2;4.3,5.3"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="3.75,4.25"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O49"><dia:attribute name="obj_pos"><dia:point val="7.5,5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7,4.55;8,5.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="7.5,5"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O50"><dia:attribute name="obj_pos"><dia:point val="4.25,5.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.25,4.85;7.25,5.65"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="4.25,5.25"/><dia:point val="7.25,5.25"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O48" connection="7"/><dia:connection handle="1" to="O52" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O51"><dia:attribute name="obj_pos"><dia:point val="4,5.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.95,5.2;4.05,14.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="4,5.25"/><dia:point val="4,14.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O48" connection="6"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O52"><dia:attribute name="obj_pos"><dia:point val="7.25,5.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.2,5.2;7.8,14.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="7.25,5.25"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="9.25"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O53"><dia:attribute name="obj_pos"><dia:point val="7.75,5.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,5.1;14.29,5.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.75,5.5"/><dia:point val="14.25,5.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O55" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O54"><dia:attribute name="obj_pos"><dia:point val="11.25,5.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.025,4.875;12.475,5.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job step req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,5.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O55"><dia:attribute name="obj_pos"><dia:point val="14.25,5.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.2,5.45;14.8,6.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.25,5.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bb"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O56"><dia:attribute name="obj_pos"><dia:point val="4,3.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.75,3.3;4.25,4.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#sh#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="4,3.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O57"><dia:attribute name="obj_pos"><dia:point val="7.25,14.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.315,14.1;7.25,14.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,14.5"/><dia:point val="4.315,14.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O52" connection="5"/><dia:connection handle="1" to="O58" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O58"><dia:attribute name="obj_pos"><dia:point val="3.75,14.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.7,14.45;4.365,15.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="3.75,14.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.565"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O59"><dia:attribute name="obj_pos"><dia:point val="19.5,8.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,8.09999;19.54,8.89999"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="19.5,8.5"/><dia:point val="7.75,8.49999"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O60"><dia:attribute name="obj_pos"><dia:point val="13.5,8.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="11.7,7.875;15.3,8.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="13.5,8.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O61"><dia:attribute name="obj_pos"><dia:point val="4.315,15.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.315,15.1;8.5,15.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="4.315,15.5"/><dia:point val="8.5,15.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O58" connection="7"/><dia:connection handle="1" to="O62" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O62"><dia:attribute name="obj_pos"><dia:point val="8.5,15.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.45,15.45;9.05,16.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="8.5,15.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O63"><dia:attribute name="obj_pos"><dia:point val="11.5,15.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.7,15.375;13.3,16.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#release allocation#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.5,15.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O64"><dia:attribute name="obj_pos"><dia:point val="9,16"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.96,15.6;14.29,16.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="9,16"/><dia:point val="14.25,16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O62" connection="4"/><dia:connection handle="1" to="O65" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O65"><dia:attribute name="obj_pos"><dia:point val="14.25,16"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.2,15.95;14.8,18.3"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.25,16"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.25"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O66"><dia:attribute name="obj_pos"><dia:point val="14.75,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.71,16.1;19.54,16.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.75,16.5"/><dia:point val="19.5,16.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O70" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O67"><dia:attribute name="obj_pos"><dia:point val="17,16.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.6,15.875;18.4,16.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run epilog req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="17,16.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O68"><dia:attribute name="obj_pos"><dia:point val="19.5,18"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.71,17.6;19.54,18.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="19.5,18"/><dia:point val="14.75,18"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O70" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O69"><dia:attribute name="obj_pos"><dia:point val="17.5,17.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.925,17.375;19.075,18.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run epilog reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="17.5,17.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O70"><dia:attribute name="obj_pos"><dia:point val="19.5,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.45,16.45;20.05,18.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="19.5,16.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O71"><dia:attribute name="obj_pos"><dia:point val="7.75,14.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,13.85;14.54,14.65"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.75,14.25"/><dia:point val="14.5,14.25"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O72"><dia:attribute name="obj_pos"><dia:point val="11.25,14.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.6,13.625;12.9,14.425"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#release job step#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,14.125"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O73"><dia:attribute name="obj_pos"><dia:point val="14.5,11.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,11.35;14.54,12.15"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.5,11.75"/><dia:point val="7.75,11.75"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O74"><dia:attribute name="obj_pos"><dia:point val="7.75,11.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.71,11.475;14.54,12.275"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.75,11.875"/><dia:point val="14.5,11.875"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:group><dia:object type="Standard - Line" version="0" id="O75"><dia:attribute name="obj_pos"><dia:point val="6.25,1.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.225,1.225;6.275,18.525"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,1.25"/><dia:point val="6.25,18.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O76"><dia:attribute name="obj_pos"><dia:point val="6.25,1.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,1.21464;6.28536,1.78536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,1.25"/><dia:point val="5.75,1.75"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O77"><dia:attribute name="obj_pos"><dia:point val="6.25,1.92"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,1.88464;6.28536,2.45536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,1.92"/><dia:point val="5.75,2.42"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O78"><dia:attribute name="obj_pos"><dia:point val="6.25,2.59"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,2.55464;6.28536,3.12536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,2.59"/><dia:point val="5.75,3.09"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O79"><dia:attribute name="obj_pos"><dia:point val="6.25,3.26"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,3.22464;6.28536,3.79536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,3.26"/><dia:point val="5.75,3.76"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O80"><dia:attribute name="obj_pos"><dia:point val="6.25,3.93"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,3.89464;6.28536,4.46536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,3.93"/><dia:point val="5.75,4.43"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O81"><dia:attribute name="obj_pos"><dia:point val="6.25,4.6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,4.56464;6.28536,5.13536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,4.6"/><dia:point val="5.75,5.1"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O82"><dia:attribute name="obj_pos"><dia:point val="6.25,5.27"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,5.23464;6.28536,5.80536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,5.27"/><dia:point val="5.75,5.77"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O83"><dia:attribute name="obj_pos"><dia:point val="6.25,5.94"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,5.90464;6.28536,6.47536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,5.94"/><dia:point val="5.75,6.44"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O84"><dia:attribute name="obj_pos"><dia:point val="6.25,6.61"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,6.57464;6.28536,7.14536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,6.61"/><dia:point val="5.75,7.11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O85"><dia:attribute name="obj_pos"><dia:point val="6.25,7.28"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,7.24464;6.28536,7.81536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,7.28"/><dia:point val="5.75,7.78"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O86"><dia:attribute name="obj_pos"><dia:point val="6.25,7.95"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,7.91464;6.28536,8.48536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,7.95"/><dia:point val="5.75,8.45"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O87"><dia:attribute name="obj_pos"><dia:point val="6.25,8.62"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,8.58464;6.28536,9.15536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,8.62"/><dia:point val="5.75,9.12"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O88"><dia:attribute name="obj_pos"><dia:point val="6.25,9.29"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,9.25464;6.28536,9.82536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,9.29"/><dia:point val="5.75,9.79"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O89"><dia:attribute name="obj_pos"><dia:point val="6.25,9.96"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,9.92464;6.28536,10.4954"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,9.96"/><dia:point val="5.75,10.46"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O90"><dia:attribute name="obj_pos"><dia:point val="6.25,10.63"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,10.5946;6.28536,11.1654"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,10.63"/><dia:point val="5.75,11.13"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O91"><dia:attribute name="obj_pos"><dia:point val="6.25,11.3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,11.2646;6.28536,11.8354"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,11.3"/><dia:point val="5.75,11.8"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O92"><dia:attribute name="obj_pos"><dia:point val="6.25,11.97"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,11.9346;6.28536,12.5054"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,11.97"/><dia:point val="5.75,12.47"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O93"><dia:attribute name="obj_pos"><dia:point val="6.25,12.64"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,12.6046;6.28536,13.1754"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,12.64"/><dia:point val="5.75,13.14"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O94"><dia:attribute name="obj_pos"><dia:point val="6.25,13.31"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,13.2746;6.28536,13.8454"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,13.31"/><dia:point val="5.75,13.81"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O95"><dia:attribute name="obj_pos"><dia:point val="6.25,13.98"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,13.9446;6.28536,14.5154"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,13.98"/><dia:point val="5.75,14.48"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O96"><dia:attribute name="obj_pos"><dia:point val="6.25,14.65"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,14.6146;6.28536,15.1854"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,14.65"/><dia:point val="5.75,15.15"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O97"><dia:attribute name="obj_pos"><dia:point val="6.25,15.32"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,15.2846;6.28536,15.8554"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,15.32"/><dia:point val="5.75,15.82"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O98"><dia:attribute name="obj_pos"><dia:point val="6.25,15.99"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,15.9546;6.28536,16.5254"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,15.99"/><dia:point val="5.75,16.49"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O99"><dia:attribute name="obj_pos"><dia:point val="6.25,16.66"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,16.6246;6.28536,17.1954"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,16.66"/><dia:point val="5.75,17.16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O100"><dia:attribute name="obj_pos"><dia:point val="6.25,17.33"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,17.2946;6.28536,17.8654"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,17.33"/><dia:point val="5.75,17.83"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O101"><dia:attribute name="obj_pos"><dia:point val="6.25,18"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.71464,17.9646;6.28536,18.5354"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.25,18"/><dia:point val="5.75,18.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object></dia:group><dia:object type="Standard - Line" version="0" id="O102"><dia:attribute name="obj_pos"><dia:point val="21.5,14"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="20.975,13.7;21.525,14.3"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.5,14"/><dia:point val="21,14"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O45" connection="5"/><dia:connection handle="1" to="O28" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O103"><dia:attribute name="obj_pos"><dia:point val="11.75,11.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.825,11.125;13.675,11.925"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job/job step status#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.75,11.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O104"><dia:attribute name="obj_pos"><dia:point val="21.185,7.66833"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.185,7.26833;22.385,7.91833"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#prolog#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="21.185,7.66833"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O105"><dia:attribute name="obj_pos"><dia:point val="20,7.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.96,7.1;21.04,7.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="20,7.5"/><dia:point val="21,7.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O17" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O106"><dia:attribute name="obj_pos"><dia:point val="20,16.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="19.9587,16.1339;21.128,16.9361"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="20,16.5"/><dia:point val="21.075,16.535"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O70" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O107"><dia:attribute name="obj_pos"><dia:point val="21.285,16.6683"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.285,16.2683;22.485,16.9183"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#epilog#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="21.285,16.6683"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object></dia:layer></dia:diagram>
diff --git a/doc/figures/arch.eps b/doc/figures/arch.eps
index 71f85ce0ba084f60bcbef9c503e257f49b60f633..d4635a811305487d90010a557113ec9f7f5ac58a 100644
--- a/doc/figures/arch.eps
+++ b/doc/figures/arch.eps
@@ -440,7 +440,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.939330 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -466,7 +466,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.354197 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 1984 0 moveto
@@ -497,7 +497,7 @@ start_ol
 704 866 1033 1024 1394 1024 curveto
 1575 1024 1772 1003 1984 966 curveto
 1984 702 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.769063 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -532,7 +532,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.183930 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -558,7 +558,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.598797 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 2851 896 moveto
@@ -577,7 +577,7 @@ start_ol
 829 1571 1135 1722 1514 1722 curveto
 1892 1722 2193 1575 2276 1344 curveto
 752 1344 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.013663 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -600,7 +600,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -645,7 +645,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.553163 16.325000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -676,7 +676,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.968030 16.325000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -706,7 +706,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.382897 16.325000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -741,7 +741,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -786,7 +786,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.142530 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2240 -576 moveto
@@ -822,7 +822,7 @@ start_ol
 802 820 704 991 704 1149 curveto
 704 1307 802 1478 951 1584 curveto
 1085 1674 1256 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 3.557397 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -852,7 +852,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.972263 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2851 896 moveto
@@ -871,7 +871,7 @@ start_ol
 829 1571 1135 1722 1514 1722 curveto
 1892 1722 2193 1575 2276 1344 curveto
 752 1344 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.387130 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -901,7 +901,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.801997 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2851 896 moveto
@@ -920,7 +920,7 @@ start_ol
 829 1571 1135 1722 1514 1722 curveto
 1892 1722 2193 1575 2276 1344 curveto
 752 1344 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -965,7 +965,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.349963 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1792 2240 moveto
@@ -993,7 +993,7 @@ start_ol
 1088 2656 lineto
 1728 2656 lineto
 1728 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.764830 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -1028,7 +1028,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.179697 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1536 1722 moveto
@@ -1066,7 +1066,7 @@ start_ol
 2444 508 2397 518 2257 518 curveto
 1536 518 lineto
 1536 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.594563 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1552 2240 moveto
@@ -1079,7 +1079,7 @@ start_ol
 2368 781 2005 518 1536 518 curveto
 1067 518 704 781 704 1120 curveto
 704 1464 1067 1722 1547 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.462745 0.615686 0.462745 srgb
 n 12.150000 9.000000 m 12.150000 12.650000 l 20.350000 12.650000 l 20.350000 9.000000 l f
 n 12.150000 9.400000 m 12.150000 9.400000 0.400000 0.400000 180.000000 270.000000 ellipse f
@@ -1132,7 +1132,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 14.793733 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1155,7 +1155,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.208600 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -1185,7 +1185,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.623467 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1216,7 +1216,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.038333 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -1253,7 +1253,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.453200 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -1279,7 +1279,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.868067 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1088 1722 moveto
@@ -1311,7 +1311,7 @@ start_ol
 1337 518 1192 551 1129 613 curveto
 1098 646 1088 689 1088 789 curveto
 1088 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.282933 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1334,7 +1334,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.697800 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -1368,7 +1368,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 14.472000 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 2241 3276 moveto
@@ -1382,7 +1382,7 @@ start_ol
 1920 1768 2081 2265 2444 2846 curveto
 2496 2929 2506 2960 2506 3022 curveto
 2506 3162 2392 3276 2241 3276 curveto
-end_ol grestore 
+end_ol grestore
 gsave 14.886867 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 896 351 moveto
@@ -1417,7 +1417,7 @@ start_ol
 1211 576 896 815 896 1154 curveto
 896 1307 994 1483 1144 1584 curveto
 1279 1679 1449 1722 1667 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 15.301733 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1448,7 +1448,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.716600 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1792 2240 moveto
@@ -1476,7 +1476,7 @@ start_ol
 1088 2656 lineto
 1728 2656 lineto
 1728 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.131467 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -1513,7 +1513,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.546333 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1984 0 moveto
@@ -1544,7 +1544,7 @@ start_ol
 704 866 1033 1024 1394 1024 curveto
 1575 1024 1772 1003 1984 966 curveto
 1984 702 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.961200 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1575,7 +1575,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.376067 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1255 -15 moveto
@@ -1609,7 +1609,7 @@ start_ol
 197 2214 109 2120 109 1980 curveto
 109 1834 202 1741 363 1720 curveto
 1255 -15 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.790933 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 895 3276 moveto
@@ -1623,7 +1623,7 @@ start_ol
 1569 5 1792 669 1792 1223 curveto
 1792 1794 1605 2390 1237 2976 curveto
 1087 3219 1019 3276 895 3276 curveto
-end_ol grestore 
+end_ol grestore
 0.870588 0.870588 0.870588 srgb
 n 16.150000 4.000000 m 16.150000 7.250000 l 22.350000 7.250000 l 22.350000 4.000000 l f
 n 16.150000 4.400000 m 16.150000 4.400000 0.400000 0.400000 180.000000 270.000000 ellipse f
@@ -1676,7 +1676,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.793733 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1699,7 +1699,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 18.208600 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -1729,7 +1729,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 18.623467 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1760,7 +1760,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.038333 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -1797,7 +1797,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.453200 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -1823,7 +1823,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.868067 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1088 1722 moveto
@@ -1855,7 +1855,7 @@ start_ol
 1337 518 1192 551 1129 613 curveto
 1098 646 1088 689 1088 789 curveto
 1088 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.282933 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1878,7 +1878,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.697800 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -1912,7 +1912,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 17.679433 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 2241 3276 moveto
@@ -1926,7 +1926,7 @@ start_ol
 1920 1768 2081 2265 2444 2846 curveto
 2496 2929 2506 2960 2506 3022 curveto
 2506 3162 2392 3276 2241 3276 curveto
-end_ol grestore 
+end_ol grestore
 gsave 18.094300 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 896 3137 moveto
@@ -1959,7 +1959,7 @@ start_ol
 1211 518 896 768 896 1127 curveto
 896 1277 968 1427 1098 1540 curveto
 1243 1667 1413 1722 1667 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 18.509167 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 1984 0 moveto
@@ -1990,7 +1990,7 @@ start_ol
 704 866 1033 1024 1394 1024 curveto
 1575 1024 1772 1003 1984 966 curveto
 1984 702 lineto
-end_ol grestore 
+end_ol grestore
 gsave 18.924033 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -2016,7 +2016,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.338900 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 1088 843 moveto
@@ -2056,7 +2056,7 @@ start_ol
 128 88 244 0 476 0 curveto
 1088 0 lineto
 1088 843 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.753767 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2086,7 +2086,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.168633 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 896 351 moveto
@@ -2121,7 +2121,7 @@ start_ol
 1211 576 896 815 896 1154 curveto
 896 1307 994 1483 1144 1584 curveto
 1279 1679 1449 1722 1667 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 20.583500 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 895 3276 moveto
@@ -2135,7 +2135,7 @@ start_ol
 1569 5 1792 669 1792 1223 curveto
 1792 1794 1605 2390 1237 2976 curveto
 1087 3219 1019 3276 895 3276 curveto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -2180,7 +2180,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.781897 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -2206,7 +2206,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.196763 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1552 2240 moveto
@@ -2219,7 +2219,7 @@ start_ol
 2368 781 2005 518 1536 518 curveto
 1067 518 704 781 704 1120 curveto
 704 1464 1067 1722 1547 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 3.611630 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -2254,7 +2254,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.026497 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1088 1722 moveto
@@ -2286,7 +2286,7 @@ start_ol
 1337 518 1192 551 1129 613 curveto
 1098 646 1088 689 1088 789 curveto
 1088 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.441363 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2317,7 +2317,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.856230 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1552 2240 moveto
@@ -2330,7 +2330,7 @@ start_ol
 2368 781 2005 518 1536 518 curveto
 1067 518 704 781 704 1120 curveto
 704 1464 1067 1722 1547 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 5.271097 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2353,7 +2353,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 0.462745 0.619608 0.462745 srgb
 n 9.250000 18.000000 m 9.250000 20.000000 l 12.750000 20.000000 l 12.750000 18.000000 l f
 0.100000 slw
@@ -2394,7 +2394,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 10.166033 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2417,7 +2417,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 10.580900 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2447,7 +2447,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 10.995767 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2478,7 +2478,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.410633 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -2515,7 +2515,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.825500 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -2549,7 +2549,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.462745 0.619608 0.462745 srgb
 n 19.500000 18.000000 m 19.500000 20.000000 l 23.000000 20.000000 l 23.000000 18.000000 l f
 0.100000 slw
@@ -2590,7 +2590,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.416033 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2613,7 +2613,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.830900 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2643,7 +2643,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.245767 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2674,7 +2674,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.660633 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -2711,7 +2711,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.075500 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -2745,7 +2745,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.462745 0.619608 0.462745 srgb
 n 13.500000 18.000000 m 13.500000 20.000000 l 17.000000 20.000000 l 17.000000 18.000000 l f
 0.100000 slw
@@ -2786,7 +2786,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 14.416033 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2809,7 +2809,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 14.830900 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2839,7 +2839,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.245767 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2870,7 +2870,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.660633 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -2907,7 +2907,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.075500 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -2941,7 +2941,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.200000 slw
 [1.000000] 0 sd
 [0.200000] 0 sd
@@ -2961,7 +2961,7 @@ start_ol
 3083 0 3712 528 3712 1348 curveto
 3712 4224 lineto
 3200 4224 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.550333 2.500000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -2982,7 +2982,7 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.931333 2.500000 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3001,7 +3001,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.354667 2.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3015,7 +3015,7 @@ start_ol
 1357 3008 1115 2849 832 2463 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.000000 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 3769 2899 moveto
@@ -3033,7 +3033,7 @@ start_ol
 2427 3752 2744 3654 2917 3500 curveto
 3072 3364 3159 3194 3222 2899 curveto
 3769 2899 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.550333 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3046,7 +3046,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 3.973667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3069,7 +3069,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.608667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3092,7 +3092,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.243667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 3115 414 moveto
@@ -3121,7 +3121,7 @@ start_ol
 768 1144 977 1289 1481 1357 curveto
 1980 1420 2081 1440 2240 1508 curveto
 2240 1019 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.667000 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3137,7 +3137,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 6.090333 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3155,7 +3155,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 6.513667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -3176,7 +3176,7 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -3326,7 +3326,7 @@ start_ol
 2427 3752 2744 3654 2917 3500 curveto
 3072 3364 3159 3194 3222 2899 curveto
 3769 2899 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.880133 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3339,7 +3339,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 22.303467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3355,7 +3355,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.726800 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 1519 3008 moveto
@@ -3375,7 +3375,7 @@ start_ol
 1024 2616 lineto
 1519 2616 lineto
 1519 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.938467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3389,7 +3389,7 @@ start_ol
 1357 3008 1115 2849 832 2463 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.192467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3402,7 +3402,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 23.615800 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 896 4224 moveto
@@ -3410,7 +3410,7 @@ start_ol
 384 0 lineto
 896 0 lineto
 896 4224 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.785133 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 896 4224 moveto
@@ -3418,7 +3418,7 @@ start_ol
 384 0 lineto
 896 0 lineto
 896 4224 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.954467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3437,7 +3437,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.377800 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3451,7 +3451,7 @@ start_ol
 1357 3008 1115 2849 832 2463 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.417600 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3469,7 +3469,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 21.840933 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 3115 414 moveto
@@ -3498,7 +3498,7 @@ start_ol
 768 1144 977 1289 1481 1357 curveto
 1980 1420 2081 1440 2240 1508 curveto
 2240 1019 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.264267 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3517,7 +3517,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.687600 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3540,7 +3540,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.322600 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3553,7 +3553,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 23.745933 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3569,7 +3569,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.169267 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -3590,7 +3590,7 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.320533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 3769 2899 moveto
@@ -3608,7 +3608,7 @@ start_ol
 2427 3752 2744 3654 2917 3500 curveto
 3072 3364 3159 3194 3222 2899 curveto
 3769 2899 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.870867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3621,7 +3621,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 24.294200 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3644,7 +3644,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.929200 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 320 -1270 moveto
@@ -3662,7 +3662,7 @@ start_ol
 2496 870 2155 449 1647 449 curveto
 1156 449 832 865 832 1504 curveto
 832 2143 1156 2559 1647 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 25.352533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2816 0 moveto
@@ -3678,7 +3678,7 @@ start_ol
 1754 0 2029 154 2304 546 curveto
 2304 0 lineto
 2816 0 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.775867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 1519 3008 moveto
@@ -3698,7 +3698,7 @@ start_ol
 1024 2616 lineto
 1519 2616 lineto
 1519 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.987533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3717,10 +3717,10 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 26.410867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 26.622533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3736,7 +3736,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 27.045867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3749,7 +3749,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 27.469200 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3767,7 +3767,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 27.892533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3786,7 +3786,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.242400 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3804,7 +3804,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 24.665733 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 3115 414 moveto
@@ -3833,7 +3833,7 @@ start_ol
 768 1144 977 1289 1481 1357 curveto
 1980 1420 2081 1440 2240 1508 curveto
 2240 1019 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.089067 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3852,7 +3852,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.512400 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3875,7 +3875,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 26.147400 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3888,7 +3888,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 26.570733 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3904,7 +3904,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 26.994067 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -3925,5 +3925,5 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 showpage
diff --git a/doc/figures/interactive-job-init.dia b/doc/figures/interactive-job-init.dia
index dca7c9858265dbec8166e3d8adf33aaf1b9f51ee..fdd223e69a2811440a8b29e4cbb38301fac7ef99 100644
--- a/doc/figures/interactive-job-init.dia
+++ b/doc/figures/interactive-job-init.dia
@@ -1,3 +1,3 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/"><dia:diagramdata><dia:attribute name="background"><dia:color val="#ffffff"/></dia:attribute><dia:attribute name="paper"><dia:composite type="paper"><dia:attribute name="name"><dia:string>#A4#</dia:string></dia:attribute><dia:attribute name="tmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="bmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="lmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="rmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="is_portrait"><dia:boolean val="true"/></dia:attribute><dia:attribute name="scaling"><dia:real val="1"/></dia:attribute><dia:attribute name="fitto"><dia:boolean val="false"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="grid"><dia:composite type="grid"><dia:attribute name="width_x"><dia:real val="0.25"/></dia:attribute><dia:attribute name="width_y"><dia:real val="0.125"/></dia:attribute><dia:attribute name="visible_x"><dia:int val="1"/></dia:attribute><dia:attribute name="visible_y"><dia:int val="1"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="guides"><dia:composite type="guides"><dia:attribute name="hguides"/><dia:attribute name="vguides"/></dia:composite></dia:attribute></dia:diagramdata><dia:layer name="Background" visible="true"><dia:object type="Standard - Line" version="0" id="O0"><dia:attribute name="obj_pos"><dia:point val="24,6.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.95,6.45;24.05,10.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="24,6.5"/><dia:point val="24,10.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O78" connection="1"/></dia:connections></dia:object><dia:group><dia:object type="Standard - PolyLine" version="0" id="O1"><dia:attribute name="obj_pos"><dia:point val="2,2.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.05563,1.71145;3.40369,4.475"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="2,2.5"/><dia:point val="1.4125,1.75"/><dia:point val="1.125,1.9875"/><dia:point val="1.8875,2.9625"/><dia:point val="1.475,4.425"/><dia:point val="1.9375,4.425"/><dia:point val="2.1875,3.475"/><dia:point val="2.425,4.425"/><dia:point val="2.85,4.425"/><dia:point val="2.525,3.175"/><dia:point val="3.1625,3.55"/><dia:point val="3.3375,3.225"/><dia:point val="2.4625,2.675"/><dia:point val="2.425,2.4875"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#010101"/></dia:attribute></dia:object><dia:object type="Standard - Arc" version="0" id="O2"><dia:attribute name="obj_pos"><dia:point val="2.4,2.525"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.83602,1.84571;2.61398,2.575"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="2.4,2.525"/><dia:point val="2.05,2.525"/></dia:attribute><dia:attribute name="arc_color"><dia:color val="#010101"/></dia:attribute><dia:attribute name="curve_distance"><dia:real val="0.629294"/></dia:attribute></dia:object></dia:group><dia:object type="Standard - Text" version="0" id="O3"><dia:attribute name="obj_pos"><dia:point val="2,1.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.5,1;2.5,1.85"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#User#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.65"/></dia:attribute><dia:attribute name="pos"><dia:point val="2,1.5"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O4"><dia:attribute name="obj_pos"><dia:point val="15,2.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.95,2.45;15.05,16.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15,2.5"/><dia:point val="15,16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b3b3b3"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O5"><dia:attribute name="obj_pos"><dia:point val="22,2.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.95,2.45;22.05,16.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22,2.5"/><dia:point val="22,16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b1b1b1"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O6"><dia:attribute name="obj_pos"><dia:point val="15,2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.875,1.55;16.125,2.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmctld#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="15,2"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O7"><dia:attribute name="obj_pos"><dia:point val="22,2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.25,1.55;22.75,2.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="22,2"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O8"><dia:attribute name="obj_pos"><dia:point val="7.25,3.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,2.60001;14.79,3.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,3.00001"/><dia:point val="14.75,3.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O38" connection="2"/><dia:connection handle="1" to="O9" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O9"><dia:attribute name="obj_pos"><dia:point val="14.75,3.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.7,2.95001;15.3,4.05001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.75,3.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bb"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O10"><dia:attribute name="obj_pos"><dia:point val="14.75,4.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,3.60001;14.79,4.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.75,4.00001"/><dia:point val="7.25,4.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O9" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O11"><dia:attribute name="obj_pos"><dia:point val="11,2.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.325,2.375;12.675,3.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#register job step#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11,2.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O12"><dia:attribute name="obj_pos"><dia:point val="11,3.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.75,3.375;13.25,4.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#register job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11,3.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O13"><dia:attribute name="obj_pos"><dia:point val="7.25,5.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,4.60001;21.79,5.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,5.00001"/><dia:point val="21.75,5.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O17" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O14"><dia:attribute name="obj_pos"><dia:point val="14.5,4.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.875,4.375;16.125,5.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,4.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O15"><dia:attribute name="obj_pos"><dia:point val="21.75,6.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,5.60001;21.79,6.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.75,6.00001"/><dia:point val="7.25,6.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O16"><dia:attribute name="obj_pos"><dia:point val="14.5,5.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.7,5.375;16.3,6.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,5.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O17"><dia:attribute name="obj_pos"><dia:point val="21.75,5.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,4.95001;22.3,6.55001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.75,5.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.500003"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O18"><dia:attribute name="obj_pos"><dia:point val="22.8,5.90001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.8,5.50001;24.2,6.15001"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="22.8,5.90001"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O19"><dia:attribute name="obj_pos"><dia:point val="22,6.50001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.975,6.20001;22.775,6.80001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22,6.50001"/><dia:point val="22.75,6.50001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O17" connection="6"/><dia:connection handle="1" to="O33" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O20"><dia:attribute name="obj_pos"><dia:point val="23.5,6.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.5,5.975;25.7,6.625"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#session_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="23.5,6.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O21"><dia:attribute name="obj_pos"><dia:point val="23.75,7.50001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,7.00001;23.79,8.00001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23.75,7.50001"/><dia:point val="7.25,7.50001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="start_arrow"><dia:enum val="13"/></dia:attribute><dia:attribute name="start_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="start_arrow_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="3"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.5"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O22"><dia:attribute name="obj_pos"><dia:point val="15.5,7.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.525,6.875;17.475,7.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#connect(stdout/err)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="15.5,7.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O23"><dia:attribute name="obj_pos"><dia:point val="23.75,11"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,10.6;23.79,11.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23.75,11"/><dia:point val="7.25,11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O24"><dia:attribute name="obj_pos"><dia:point val="15.5,10.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.075,10.375;16.925,11.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#task exit msg#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="15.5,10.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O25"><dia:attribute name="obj_pos"><dia:point val="7,13"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.96,12.6;14.79,13.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7,13"/><dia:point val="14.75,13"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O28" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O26"><dia:attribute name="obj_pos"><dia:point val="23.05,6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.9495,5.94955;23.1005,11.5505"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23.05,6"/><dia:point val="23,11.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O35" connection="1"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O27"><dia:attribute name="obj_pos"><dia:point val="11,12.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.2,12.375;12.8,13.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#release allocation#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11,12.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O28"><dia:attribute name="obj_pos"><dia:point val="14.75,13"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.7,12.95;15.3,15.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.75,13"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O29"><dia:attribute name="obj_pos"><dia:point val="15.25,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.21,13.1;21.79,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15.25,13.5"/><dia:point val="21.75,13.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O36" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O30"><dia:attribute name="obj_pos"><dia:point val="18.5,13.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="17.1,12.875;19.9,13.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run epilog req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="18.5,13.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O31"><dia:attribute name="obj_pos"><dia:point val="21.75,15"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.21,14.6;21.79,15.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.75,15"/><dia:point val="15.25,15"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O36" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O32"><dia:attribute name="obj_pos"><dia:point val="18.5,14.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="16.925,14.375;20.075,15.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run epilog reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="18.5,14.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O33"><dia:attribute name="obj_pos"><dia:point val="22.75,6.50001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.7,6.45001;23.3,7.05001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="22.75,6.50001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O34"><dia:attribute name="obj_pos"><dia:point val="23.75,7.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.7,6.95001;24.3,8.05001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="23.75,7.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O35"><dia:attribute name="obj_pos"><dia:point val="22.75,11.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.7,11.45;23.3,12.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="22.75,11.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O36"><dia:attribute name="obj_pos"><dia:point val="21.75,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,13.45;22.3,15.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.75,13.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O37"><dia:attribute name="obj_pos"><dia:point val="7,2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.5,1.55;7.5,2.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="7,2"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O38"><dia:attribute name="obj_pos"><dia:point val="6.75,3.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.7,2.95001;7.3,13.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="6.75,3.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="10.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O39"><dia:attribute name="obj_pos"><dia:point val="3.5,3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.45,2.60001;6.8,3.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="3.5,3"/><dia:point val="6.75,3.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O38" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O40"><dia:attribute name="obj_pos"><dia:point val="6.75,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.45,13.1;6.8,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.75,13.5"/><dia:point val="3.5,13.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O38" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O41"><dia:attribute name="obj_pos"><dia:point val="15,8.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,8.10001;15.04,8.90001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15,8.5"/><dia:point val="7.25,8.50001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O42"><dia:attribute name="obj_pos"><dia:point val="7.25,9.05001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,8.65;14.84,9.45"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,9.05001"/><dia:point val="14.8,9.05"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O43"><dia:attribute name="obj_pos"><dia:point val="11.25,8.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.65,7.975;12.85,8.625"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#status req (periodic)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,8.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O44"><dia:attribute name="obj_pos"><dia:point val="11.25,9.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.3,8.975;12.2,9.625"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#status reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,9.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O45"><dia:attribute name="obj_pos"><dia:point val="2.5,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.85,13;3.15,14.5"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#exit 
+<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/"><dia:diagramdata><dia:attribute name="background"><dia:color val="#ffffff"/></dia:attribute><dia:attribute name="paper"><dia:composite type="paper"><dia:attribute name="name"><dia:string>#A4#</dia:string></dia:attribute><dia:attribute name="tmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="bmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="lmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="rmargin"><dia:real val="2.8222"/></dia:attribute><dia:attribute name="is_portrait"><dia:boolean val="true"/></dia:attribute><dia:attribute name="scaling"><dia:real val="1"/></dia:attribute><dia:attribute name="fitto"><dia:boolean val="false"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="grid"><dia:composite type="grid"><dia:attribute name="width_x"><dia:real val="0.25"/></dia:attribute><dia:attribute name="width_y"><dia:real val="0.125"/></dia:attribute><dia:attribute name="visible_x"><dia:int val="1"/></dia:attribute><dia:attribute name="visible_y"><dia:int val="1"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="guides"><dia:composite type="guides"><dia:attribute name="hguides"/><dia:attribute name="vguides"/></dia:composite></dia:attribute></dia:diagramdata><dia:layer name="Background" visible="true"><dia:object type="Standard - Line" version="0" id="O0"><dia:attribute name="obj_pos"><dia:point val="24,6.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.95,6.45;24.05,10.55"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="24,6.5"/><dia:point val="24,10.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O78" connection="1"/></dia:connections></dia:object><dia:group><dia:object type="Standard - PolyLine" version="0" id="O1"><dia:attribute name="obj_pos"><dia:point val="2,2.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.05563,1.71145;3.40369,4.475"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="2,2.5"/><dia:point val="1.4125,1.75"/><dia:point val="1.125,1.9875"/><dia:point val="1.8875,2.9625"/><dia:point val="1.475,4.425"/><dia:point val="1.9375,4.425"/><dia:point val="2.1875,3.475"/><dia:point val="2.425,4.425"/><dia:point val="2.85,4.425"/><dia:point val="2.525,3.175"/><dia:point val="3.1625,3.55"/><dia:point val="3.3375,3.225"/><dia:point val="2.4625,2.675"/><dia:point val="2.425,2.4875"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#010101"/></dia:attribute></dia:object><dia:object type="Standard - Arc" version="0" id="O2"><dia:attribute name="obj_pos"><dia:point val="2.4,2.525"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.83602,1.84571;2.61398,2.575"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="2.4,2.525"/><dia:point val="2.05,2.525"/></dia:attribute><dia:attribute name="arc_color"><dia:color val="#010101"/></dia:attribute><dia:attribute name="curve_distance"><dia:real val="0.629294"/></dia:attribute></dia:object></dia:group><dia:object type="Standard - Text" version="0" id="O3"><dia:attribute name="obj_pos"><dia:point val="2,1.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.5,1;2.5,1.85"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#User#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.65"/></dia:attribute><dia:attribute name="pos"><dia:point val="2,1.5"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O4"><dia:attribute name="obj_pos"><dia:point val="15,2.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.95,2.45;15.05,16.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15,2.5"/><dia:point val="15,16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b3b3b3"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O5"><dia:attribute name="obj_pos"><dia:point val="22,2.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.95,2.45;22.05,16.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22,2.5"/><dia:point val="22,16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#b1b1b1"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O6"><dia:attribute name="obj_pos"><dia:point val="15,2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.875,1.55;16.125,2.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmctld#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="15,2"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O7"><dia:attribute name="obj_pos"><dia:point val="22,2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.25,1.55;22.75,2.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="22,2"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O8"><dia:attribute name="obj_pos"><dia:point val="7.25,3.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,2.60001;14.79,3.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,3.00001"/><dia:point val="14.75,3.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O38" connection="2"/><dia:connection handle="1" to="O9" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O9"><dia:attribute name="obj_pos"><dia:point val="14.75,3.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.7,2.95001;15.3,4.05001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.75,3.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bb"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O10"><dia:attribute name="obj_pos"><dia:point val="14.75,4.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,3.60001;14.79,4.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="14.75,4.00001"/><dia:point val="7.25,4.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O9" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O11"><dia:attribute name="obj_pos"><dia:point val="11,2.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.325,2.375;12.675,3.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#register job step#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11,2.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O12"><dia:attribute name="obj_pos"><dia:point val="11,3.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="8.75,3.375;13.25,4.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#register job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11,3.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O13"><dia:attribute name="obj_pos"><dia:point val="7.25,5.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,4.60001;21.79,5.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,5.00001"/><dia:point val="21.75,5.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O17" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O14"><dia:attribute name="obj_pos"><dia:point val="14.5,4.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.875,4.375;16.125,5.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,4.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O15"><dia:attribute name="obj_pos"><dia:point val="21.75,6.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,5.60001;21.79,6.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.75,6.00001"/><dia:point val="7.25,6.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O16"><dia:attribute name="obj_pos"><dia:point val="14.5,5.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.7,5.375;16.3,6.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run job step reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="14.5,5.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O17"><dia:attribute name="obj_pos"><dia:point val="21.75,5.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,4.95001;22.3,6.55001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.75,5.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.500003"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O18"><dia:attribute name="obj_pos"><dia:point val="22.8,5.90001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.8,5.50001;24.2,6.15001"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#job_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="22.8,5.90001"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O19"><dia:attribute name="obj_pos"><dia:point val="22,6.50001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.975,6.20001;22.775,6.80001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22,6.50001"/><dia:point val="22.75,6.50001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O17" connection="6"/><dia:connection handle="1" to="O33" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O20"><dia:attribute name="obj_pos"><dia:point val="23.5,6.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.5,5.975;25.7,6.625"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#session_mgr#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="23.5,6.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O21"><dia:attribute name="obj_pos"><dia:point val="23.75,7.50001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,7.00001;23.79,8.00001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23.75,7.50001"/><dia:point val="7.25,7.50001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="start_arrow"><dia:enum val="13"/></dia:attribute><dia:attribute name="start_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="start_arrow_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="3"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.5"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.5"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O22"><dia:attribute name="obj_pos"><dia:point val="15.5,7.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.525,6.875;17.475,7.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#connect(stdout/err)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="15.5,7.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O23"><dia:attribute name="obj_pos"><dia:point val="23.75,11"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,10.6;23.79,11.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23.75,11"/><dia:point val="7.25,11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O24"><dia:attribute name="obj_pos"><dia:point val="15.5,10.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.075,10.375;16.925,11.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#task exit msg#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="15.5,10.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O25"><dia:attribute name="obj_pos"><dia:point val="7,13"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.96,12.6;14.79,13.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7,13"/><dia:point val="14.75,13"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O28" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O26"><dia:attribute name="obj_pos"><dia:point val="23.05,6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.9495,5.94955;23.1005,11.5505"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23.05,6"/><dia:point val="23,11.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O35" connection="1"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O27"><dia:attribute name="obj_pos"><dia:point val="11,12.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.2,12.375;12.8,13.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#release allocation#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="11,12.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O28"><dia:attribute name="obj_pos"><dia:point val="14.75,13"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="14.7,12.95;15.3,15.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="14.75,13"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O29"><dia:attribute name="obj_pos"><dia:point val="15.25,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.21,13.1;21.79,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15.25,13.5"/><dia:point val="21.75,13.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O36" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O30"><dia:attribute name="obj_pos"><dia:point val="18.5,13.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="17.1,12.875;19.9,13.675"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run epilog req#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="18.5,13.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O31"><dia:attribute name="obj_pos"><dia:point val="21.75,15"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.21,14.6;21.79,15.4"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="21.75,15"/><dia:point val="15.25,15"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O36" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O32"><dia:attribute name="obj_pos"><dia:point val="18.5,14.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="16.925,14.375;20.075,15.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#run epilog reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="18.5,14.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O33"><dia:attribute name="obj_pos"><dia:point val="22.75,6.50001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.7,6.45001;23.3,7.05001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="22.75,6.50001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O34"><dia:attribute name="obj_pos"><dia:point val="23.75,7.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.7,6.95001;24.3,8.05001"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="23.75,7.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O35"><dia:attribute name="obj_pos"><dia:point val="22.75,11.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.7,11.45;23.3,12.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="22.75,11.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O36"><dia:attribute name="obj_pos"><dia:point val="21.75,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,13.45;22.3,15.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.75,13.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O37"><dia:attribute name="obj_pos"><dia:point val="7,2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.5,1.55;7.5,2.35"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="7,2"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O38"><dia:attribute name="obj_pos"><dia:point val="6.75,3.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.7,2.95001;7.3,13.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="6.75,3.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="10.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O39"><dia:attribute name="obj_pos"><dia:point val="3.5,3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.45,2.60001;6.8,3.40001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="3.5,3"/><dia:point val="6.75,3.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="1" to="O38" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O40"><dia:attribute name="obj_pos"><dia:point val="6.75,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.45,13.1;6.8,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.75,13.5"/><dia:point val="3.5,13.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.6"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O38" connection="5"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O41"><dia:attribute name="obj_pos"><dia:point val="15,8.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,8.10001;15.04,8.90001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15,8.5"/><dia:point val="7.25,8.50001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O42"><dia:attribute name="obj_pos"><dia:point val="7.25,9.05001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.21,8.65;14.84,9.45"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.25,9.05001"/><dia:point val="14.8,9.05"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O43"><dia:attribute name="obj_pos"><dia:point val="11.25,8.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.65,7.975;12.85,8.625"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#status req (periodic)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,8.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O44"><dia:attribute name="obj_pos"><dia:point val="11.25,9.375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.3,8.975;12.2,9.625"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#status reply#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.25,9.375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O45"><dia:attribute name="obj_pos"><dia:point val="2.5,13.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="1.85,13;3.15,14.5"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#exit
 status#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.65"/></dia:attribute><dia:attribute name="pos"><dia:point val="2.5,13.5"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:group><dia:object type="Standard - Line" version="0" id="O46"><dia:attribute name="obj_pos"><dia:point val="5,1.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.975,1.475;5.025,16.025"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,1.5"/><dia:point val="5,16"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O47"><dia:attribute name="obj_pos"><dia:point val="5,1.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,1.46464;5.03536,2.03536"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,1.5"/><dia:point val="4.5,2"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O48"><dia:attribute name="obj_pos"><dia:point val="5,2.1845"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,2.14914;5.03536,2.71986"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,2.1845"/><dia:point val="4.5,2.6845"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O49"><dia:attribute name="obj_pos"><dia:point val="5,2.869"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,2.83364;5.03536,3.40436"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,2.869"/><dia:point val="4.5,3.369"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O50"><dia:attribute name="obj_pos"><dia:point val="5,3.5536"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,3.51824;5.03536,4.08896"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,3.5536"/><dia:point val="4.5,4.0536"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O51"><dia:attribute name="obj_pos"><dia:point val="5,4.2381"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,4.20274;5.03536,4.77346"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,4.2381"/><dia:point val="4.5,4.7381"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O52"><dia:attribute name="obj_pos"><dia:point val="5,4.9226"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,4.88724;5.03536,5.45796"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,4.9226"/><dia:point val="4.5,5.4226"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O53"><dia:attribute name="obj_pos"><dia:point val="5,5.6071"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,5.57174;5.03536,6.14246"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,5.6071"/><dia:point val="4.5,6.1071"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O54"><dia:attribute name="obj_pos"><dia:point val="5,6.2917"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,6.25634;5.03536,6.82706"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,6.2917"/><dia:point val="4.5,6.7917"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O55"><dia:attribute name="obj_pos"><dia:point val="5,6.9762"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,6.94084;5.03536,7.51156"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,6.9762"/><dia:point val="4.5,7.4762"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O56"><dia:attribute name="obj_pos"><dia:point val="5,7.6607"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,7.62534;5.03536,8.19606"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,7.6607"/><dia:point val="4.5,8.1607"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O57"><dia:attribute name="obj_pos"><dia:point val="5,8.3452"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,8.30984;5.03536,8.88056"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,8.3452"/><dia:point val="4.5,8.8452"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O58"><dia:attribute name="obj_pos"><dia:point val="5,9.0297"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,8.99434;5.03536,9.56506"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,9.0297"/><dia:point val="4.5,9.5297"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O59"><dia:attribute name="obj_pos"><dia:point val="5,9.7143"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,9.67894;5.03536,10.2497"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,9.7143"/><dia:point val="4.5,10.2143"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O60"><dia:attribute name="obj_pos"><dia:point val="5,10.3988"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,10.3634;5.03536,10.9342"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,10.3988"/><dia:point val="4.5,10.8988"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O61"><dia:attribute name="obj_pos"><dia:point val="5,11.0833"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,11.0479;5.03536,11.6187"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,11.0833"/><dia:point val="4.5,11.5833"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O62"><dia:attribute name="obj_pos"><dia:point val="5,11.7678"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,11.7324;5.03536,12.3032"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,11.7678"/><dia:point val="4.5,12.2678"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O63"><dia:attribute name="obj_pos"><dia:point val="5,12.4523"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,12.4169;5.03536,12.9877"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,12.4523"/><dia:point val="4.5,12.9523"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O64"><dia:attribute name="obj_pos"><dia:point val="5,13.1369"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,13.1015;5.03536,13.6723"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,13.1369"/><dia:point val="4.5,13.6369"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O65"><dia:attribute name="obj_pos"><dia:point val="5,13.8214"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,13.786;5.03536,14.3568"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,13.8214"/><dia:point val="4.5,14.3214"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O66"><dia:attribute name="obj_pos"><dia:point val="5,14.5059"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,14.4705;5.03536,15.0413"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,14.5059"/><dia:point val="4.5,15.0059"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O67"><dia:attribute name="obj_pos"><dia:point val="5,15.1904"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.46464,15.155;5.03536,15.7258"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5,15.1904"/><dia:point val="4.5,15.6904"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute></dia:object></dia:group><dia:object type="Standard - Box" version="0" id="O68"><dia:attribute name="obj_pos"><dia:point val="24.75,8.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="24.7,7.95001;25.3,10.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="24.75,8.00001"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O69"><dia:attribute name="obj_pos"><dia:point val="25.25,7.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="25.25,6.60001;25.25,7.25001"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>##</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="25.25,7.00001"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O70"><dia:attribute name="obj_pos"><dia:point val="24.75,7.625"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="24.75,7.225;25.35,7.875"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#cmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="24.75,7.625"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O71"><dia:attribute name="obj_pos"><dia:point val="23,7.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.975,6.70001;23.775,7.30001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="23,7.00001"/><dia:point val="23.75,7.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O33" connection="6"/><dia:connection handle="1" to="O34" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O72"><dia:attribute name="obj_pos"><dia:point val="24,11.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.225,11.2;24.025,11.8"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="24,11.5"/><dia:point val="23.25,11.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="1" to="O35" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O73"><dia:attribute name="obj_pos"><dia:point val="22.75,12.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.225,12.2;22.775,12.8"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22.75,12.5"/><dia:point val="22.25,12.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O35" connection="5"/><dia:connection handle="1" to="O74" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Box" version="0" id="O74"><dia:attribute name="obj_pos"><dia:point val="21.75,12.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="21.7,12.45;22.3,13.05"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="21.75,12.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O75"><dia:attribute name="obj_pos"><dia:point val="24.25,8.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="24.225,7.70001;24.775,8.30001"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="24.25,8.00001"/><dia:point val="24.75,8.00001"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="1" to="O68" connection="0"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O76"><dia:attribute name="obj_pos"><dia:point val="24.75,10.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="24.225,10.2;24.775,10.8"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="24.75,10.5"/><dia:point val="24.25,10.5"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.05"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="1"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.3"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.3"/></dia:attribute><dia:connections><dia:connection handle="0" to="O68" connection="5"/><dia:connection handle="1" to="O78" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O77"><dia:attribute name="obj_pos"><dia:point val="5,2.875"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.225,2.475;5.775,3.175"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun cmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="80" name="Helvetica-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="5,2.875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Box" version="0" id="O78"><dia:attribute name="obj_pos"><dia:point val="23.75,10.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.7,10.45;24.3,11.55"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="23.75,10.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="0.5"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#8989bf"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O79"><dia:attribute name="obj_pos"><dia:point val="23.25,5.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.25,4.71125;24.4638,5.4025"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#prolog#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="23.25,5.125"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O80"><dia:attribute name="obj_pos"><dia:point val="22.25,5.00001"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.208,4.64845;23.261,5.45155"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22.25,5.00001"/><dia:point val="23.2,5.05"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O17" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Text" version="0" id="O81"><dia:attribute name="obj_pos"><dia:point val="23.355,13.7783"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="23.355,13.3783;24.555,14.0283"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#epilog#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="23.355,13.7783"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="0"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O82"><dia:attribute name="obj_pos"><dia:point val="22.4,13.65"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="22.3598,13.2448;23.2874,14.0452"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="22.4,13.65"/><dia:point val="23.245,13.645"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_width"><dia:real val="0.08"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:attribute name="end_arrow"><dia:enum val="2"/></dia:attribute><dia:attribute name="end_arrow_length"><dia:real val="0.4"/></dia:attribute><dia:attribute name="end_arrow_width"><dia:real val="0.4"/></dia:attribute></dia:object></dia:layer></dia:diagram>
diff --git a/doc/figures/slurm-arch.dia b/doc/figures/slurm-arch.dia
index 9edf9558efcee3988822b8daac5fed41da4b7033..123268dcb1782a87dec7776c17c792fe97aef97f 100644
--- a/doc/figures/slurm-arch.dia
+++ b/doc/figures/slurm-arch.dia
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/"><dia:diagramdata><dia:attribute name="background"><dia:color val="#ffffff"/></dia:attribute><dia:attribute name="paper"><dia:composite type="paper"><dia:attribute name="name"><dia:string>#Letter#</dia:string></dia:attribute><dia:attribute name="tmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="bmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="lmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="rmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="is_portrait"><dia:boolean val="true"/></dia:attribute><dia:attribute name="scaling"><dia:real val="1"/></dia:attribute><dia:attribute name="fitto"><dia:boolean val="false"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="grid"><dia:composite type="grid"><dia:attribute name="width_x"><dia:real val="0.1"/></dia:attribute><dia:attribute name="width_y"><dia:real val="0.1"/></dia:attribute><dia:attribute name="visible_x"><dia:int val="1"/></dia:attribute><dia:attribute name="visible_y"><dia:int val="1"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="guides"><dia:composite type="guides"><dia:attribute name="hguides"/><dia:attribute name="vguides"/></dia:composite></dia:attribute></dia:diagramdata><dia:layer name="Background" visible="true"><dia:object type="Standard - Box" version="0" id="O0"><dia:attribute name="obj_pos"><dia:point val="-1.35,6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-1.35,6;14.9844,10"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-1.35,6"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="16.3344"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="4"/></dia:attribute><dia:attribute name="border_width"><dia:real val="1.49012e-08"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#7272af"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O1"><dia:attribute name="obj_pos"><dia:point val="4.4,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.35,7.15;9.608,9.25"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="4.4,7.2"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="5.158"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Partition Manager#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="6.979,8.35"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O2"><dia:attribute name="obj_pos"><dia:point val="-0.6,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-0.65,7.15;3.7,9.28245"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-0.6,7.2"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="4.25"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.03245"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Node Manager#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="1.525,8.36623"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O3"><dia:attribute name="obj_pos"><dia:point val="0.5,6.7"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-0.625,6.23375;1.64125,7.0825"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmctld#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="0.5,6.7"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O4"><dia:attribute name="obj_pos"><dia:point val="3.65,8.21622"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.59893,8.14893;4.45107,8.26729"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="3.65,8.21622"/><dia:point val="4.4,8.2"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O2" connection="8"/><dia:connection handle="1" to="O1" connection="7"/></dia:connections></dia:object><dia:group><dia:group><dia:object type="Standard - PolyLine" version="0" id="O5"><dia:attribute name="obj_pos"><dia:point val="17.375,3.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="16.4306,2.71145;18.7787,5.475"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="17.375,3.5"/><dia:point val="16.7875,2.75"/><dia:point val="16.5,2.9875"/><dia:point val="17.2625,3.9625"/><dia:point val="16.85,5.425"/><dia:point val="17.3125,5.425"/><dia:point val="17.5625,4.475"/><dia:point val="17.8,5.425"/><dia:point val="18.225,5.425"/><dia:point val="17.9,4.175"/><dia:point val="18.5375,4.55"/><dia:point val="18.7125,4.225"/><dia:point val="17.8375,3.675"/><dia:point val="17.8,3.4875"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#010101"/></dia:attribute></dia:object><dia:object type="Standard - Arc" version="0" id="O6"><dia:attribute name="obj_pos"><dia:point val="17.775,3.525"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="17.211,2.84571;17.989,3.575"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="17.775,3.525"/><dia:point val="17.425,3.525"/></dia:attribute><dia:attribute name="arc_color"><dia:color val="#010101"/></dia:attribute><dia:attribute name="curve_distance"><dia:real val="0.629294"/></dia:attribute></dia:object></dia:group><dia:group><dia:object type="Standard - Text" version="0" id="O7"><dia:attribute name="obj_pos"><dia:point val="18.4169,6.15373"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="17.9169,5.70373;18.9169,6.50373"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="18.4169,6.15373"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O8"><dia:attribute name="obj_pos"><dia:point val="16.7743,6.18636"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="16.1993,5.68636;17.3493,6.48636"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#User:#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="16.7743,6.18636"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object></dia:group></dia:group><dia:object type="Standard - Box" version="0" id="O9"><dia:attribute name="obj_pos"><dia:point val="-1.35,12.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-1.35,12.125;17.7667,16"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-1.35,12.125"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="19.1167"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="3.875"/></dia:attribute><dia:attribute name="border_width"><dia:real val="1.49012e-08"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#a53f3f"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O10"><dia:attribute name="obj_pos"><dia:point val="-0.125,15.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-0.875,15.3;0.625,16.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="-0.125,15.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O11"><dia:attribute name="obj_pos"><dia:point val="-0.98675,12.85"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-1.03675,12.8;2.2,14.85"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-0.98675,12.85"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="3.13675"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.95"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.4"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Machine 
+<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/"><dia:diagramdata><dia:attribute name="background"><dia:color val="#ffffff"/></dia:attribute><dia:attribute name="paper"><dia:composite type="paper"><dia:attribute name="name"><dia:string>#Letter#</dia:string></dia:attribute><dia:attribute name="tmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="bmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="lmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="rmargin"><dia:real val="2.54"/></dia:attribute><dia:attribute name="is_portrait"><dia:boolean val="true"/></dia:attribute><dia:attribute name="scaling"><dia:real val="1"/></dia:attribute><dia:attribute name="fitto"><dia:boolean val="false"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="grid"><dia:composite type="grid"><dia:attribute name="width_x"><dia:real val="0.1"/></dia:attribute><dia:attribute name="width_y"><dia:real val="0.1"/></dia:attribute><dia:attribute name="visible_x"><dia:int val="1"/></dia:attribute><dia:attribute name="visible_y"><dia:int val="1"/></dia:attribute></dia:composite></dia:attribute><dia:attribute name="guides"><dia:composite type="guides"><dia:attribute name="hguides"/><dia:attribute name="vguides"/></dia:composite></dia:attribute></dia:diagramdata><dia:layer name="Background" visible="true"><dia:object type="Standard - Box" version="0" id="O0"><dia:attribute name="obj_pos"><dia:point val="-1.35,6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-1.35,6;14.9844,10"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-1.35,6"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="16.3344"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="4"/></dia:attribute><dia:attribute name="border_width"><dia:real val="1.49012e-08"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#7272af"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O1"><dia:attribute name="obj_pos"><dia:point val="4.4,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.35,7.15;9.608,9.25"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="4.4,7.2"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="5.158"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Partition Manager#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="6.979,8.35"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O2"><dia:attribute name="obj_pos"><dia:point val="-0.6,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-0.65,7.15;3.7,9.28245"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-0.6,7.2"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="4.25"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.03245"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Node Manager#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="1.525,8.36623"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O3"><dia:attribute name="obj_pos"><dia:point val="0.5,6.7"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-0.625,6.23375;1.64125,7.0825"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmctld#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="0.5,6.7"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O4"><dia:attribute name="obj_pos"><dia:point val="3.65,8.21622"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="3.59893,8.14893;4.45107,8.26729"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="3.65,8.21622"/><dia:point val="4.4,8.2"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O2" connection="8"/><dia:connection handle="1" to="O1" connection="7"/></dia:connections></dia:object><dia:group><dia:group><dia:object type="Standard - PolyLine" version="0" id="O5"><dia:attribute name="obj_pos"><dia:point val="17.375,3.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="16.4306,2.71145;18.7787,5.475"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="17.375,3.5"/><dia:point val="16.7875,2.75"/><dia:point val="16.5,2.9875"/><dia:point val="17.2625,3.9625"/><dia:point val="16.85,5.425"/><dia:point val="17.3125,5.425"/><dia:point val="17.5625,4.475"/><dia:point val="17.8,5.425"/><dia:point val="18.225,5.425"/><dia:point val="17.9,4.175"/><dia:point val="18.5375,4.55"/><dia:point val="18.7125,4.225"/><dia:point val="17.8375,3.675"/><dia:point val="17.8,3.4875"/></dia:attribute><dia:attribute name="line_color"><dia:color val="#010101"/></dia:attribute></dia:object><dia:object type="Standard - Arc" version="0" id="O6"><dia:attribute name="obj_pos"><dia:point val="17.775,3.525"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="17.211,2.84571;17.989,3.575"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="17.775,3.525"/><dia:point val="17.425,3.525"/></dia:attribute><dia:attribute name="arc_color"><dia:color val="#010101"/></dia:attribute><dia:attribute name="curve_distance"><dia:real val="0.629294"/></dia:attribute></dia:object></dia:group><dia:group><dia:object type="Standard - Text" version="0" id="O7"><dia:attribute name="obj_pos"><dia:point val="18.4169,6.15373"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="17.9169,5.70373;18.9169,6.50373"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#srun#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="18.4169,6.15373"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O8"><dia:attribute name="obj_pos"><dia:point val="16.7743,6.18636"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="16.1993,5.68636;17.3493,6.48636"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#User:#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="16.7743,6.18636"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object></dia:group></dia:group><dia:object type="Standard - Box" version="0" id="O9"><dia:attribute name="obj_pos"><dia:point val="-1.35,12.125"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-1.35,12.125;17.7667,16"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-1.35,12.125"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="19.1167"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="3.875"/></dia:attribute><dia:attribute name="border_width"><dia:real val="1.49012e-08"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#a53f3f"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute></dia:object><dia:object type="Standard - Text" version="0" id="O10"><dia:attribute name="obj_pos"><dia:point val="-0.125,15.75"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-0.875,15.3;0.625,16.1"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#slurmd#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="courier new" style="80" name="Courier-Bold"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="-0.125,15.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O11"><dia:attribute name="obj_pos"><dia:point val="-0.98675,12.85"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="-1.03675,12.8;2.2,14.85"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="-0.98675,12.85"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="3.13675"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.95"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.4"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Machine
 Status#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="0.581625,13.725"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O12"><dia:attribute name="obj_pos"><dia:point val="6.1,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.05,12.85;9.2,14.85"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="6.1,12.9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="3.05"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.9"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.4"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Job
 Control#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="7.625,13.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O13"><dia:attribute name="obj_pos"><dia:point val="9.6,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.55,12.85;12.9653,14.825"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="9.6,12.9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="3.31525"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.875"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.4"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Remote
 Execution#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="11.2576,13.7375"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O14"><dia:attribute name="obj_pos"><dia:point val="5.7,13.85"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.65,13.8;6.15,13.9"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="5.7,13.85"/><dia:point val="6.1,13.85"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O30" connection="8"/><dia:connection handle="1" to="O12" connection="7"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O15"><dia:attribute name="obj_pos"><dia:point val="9.15,13.85"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.09863,13.7861;9.65137,13.9014"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="9.15,13.85"/><dia:point val="9.6,13.8375"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O12" connection="8"/><dia:connection handle="1" to="O13" connection="7"/></dia:connections></dia:object><dia:object type="Standard - ZigZagLine" version="0" id="O16"><dia:attribute name="obj_pos"><dia:point val="4.15,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="4.1,10.9185;11.45,12.95"/></dia:attribute><dia:attribute name="orth_points"><dia:point val="4.15,12.9"/><dia:point val="4.15,10.9685"/><dia:point val="11.4,10.9685"/><dia:point val="11.4,12.9"/></dia:attribute><dia:attribute name="orth_orient"><dia:enum val="1"/><dia:enum val="0"/><dia:enum val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O30" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O17"><dia:attribute name="obj_pos"><dia:point val="7.6,11"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="7.54935,10.9493;7.67565,12.9507"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="7.6,11"/><dia:point val="7.625,12.9"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="1" to="O12" connection="2"/></dia:connections></dia:object><dia:object type="Flowchart - Box" version="0" id="O18"><dia:attribute name="obj_pos"><dia:point val="0.20438,1.5"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="0.20438,1.5;5.84978,4.5"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="0.20438,1.5"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="5.6454"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="3"/></dia:attribute><dia:attribute name="border_width"><dia:real val="1.49012e-08"/></dia:attribute><dia:attribute name="inner_color"><dia:color val="#77af77"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.5"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Globus and/or
 Metascheduler
-(optional)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="3.02708,2.6"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - PolyLine" version="0" id="O19"><dia:attribute name="obj_pos"><dia:point val="15.9854,3.00728"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.3,2.95;16.0355,7.25"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="15.9854,3.00728"/><dia:point val="13.35,3"/><dia:point val="13.35,7.2"/></dia:attribute></dia:object><dia:object type="Standard - PolyLine" version="0" id="O20"><dia:attribute name="obj_pos"><dia:point val="5.84978,3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.79978,2.95;11.4694,7.25023"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="5.84978,3"/><dia:point val="11.4,3"/><dia:point val="11.4191,7.2"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O18" connection="8"/><dia:connection handle="2" to="O29" connection="1"/></dia:connections></dia:object><dia:object type="Standard - PolyLine" version="0" id="O21"><dia:attribute name="obj_pos"><dia:point val="15.4,3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.0361,2.95;15.45,12.9504"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="15.4,3"/><dia:point val="15.4,11"/><dia:point val="12.1,11"/><dia:point val="12.0864,12.9"/></dia:attribute><dia:connections><dia:connection handle="3" to="O13" connection="3"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O22"><dia:attribute name="obj_pos"><dia:point val="11.4,9.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="11.35,9.15;11.45,11.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="11.4,9.2"/><dia:point val="11.4,11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="2"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O23"><dia:attribute name="obj_pos"><dia:point val="9.558,8.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.5077,8.1497;10.4503,8.25535"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="9.558,8.2"/><dia:point val="10.4,8.20505"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O1" connection="8"/><dia:connection handle="1" to="O29" connection="7"/></dia:connections></dia:object><dia:object type="Flowchart - Box" version="0" id="O24"><dia:attribute name="obj_pos"><dia:point val="13.4,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.35,12.85;17.5213,14.825"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="13.4,12.9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="4.07125"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.875"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Stream Copy#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="15.4356,13.9875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O25"><dia:attribute name="obj_pos"><dia:point val="15.4,10.6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.35,10.55;15.45,12.95"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15.4,10.6"/><dia:point val="15.4,12.9"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O26"><dia:attribute name="obj_pos"><dia:point val="12.9153,13.8375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.8652,13.7875;13.45,13.8875"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="12.9153,13.8375"/><dia:point val="13.4,13.8375"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O13" connection="8"/><dia:connection handle="1" to="O24" connection="7"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O27"><dia:attribute name="obj_pos"><dia:point val="6.979,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.84907,2.94907;7.02993,7.25093"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.979,7.2"/><dia:point val="6.9,3"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O1" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O28"><dia:attribute name="obj_pos"><dia:point val="0.563434,9.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="0.513182,9.19975;0.631877,12.9003"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="0.563434,9.25"/><dia:point val="0.581625,12.85"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="1" to="O11" connection="2"/></dia:connections></dia:object><dia:object type="Flowchart - Box" version="0" id="O29"><dia:attribute name="obj_pos"><dia:point val="10.4,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.35,7.15;14.5265,9.26011"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="10.4,7.2"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="4.0765"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.01011"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Job Manager#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="12.4383,8.35506"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O30"><dia:attribute name="obj_pos"><dia:point val="2.6,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="2.55,12.85;5.75,14.85"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="2.6,12.9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="3.1"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.9"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.4"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Job 
+(optional)#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.6"/></dia:attribute><dia:attribute name="pos"><dia:point val="3.02708,2.6"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - PolyLine" version="0" id="O19"><dia:attribute name="obj_pos"><dia:point val="15.9854,3.00728"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.3,2.95;16.0355,7.25"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="15.9854,3.00728"/><dia:point val="13.35,3"/><dia:point val="13.35,7.2"/></dia:attribute></dia:object><dia:object type="Standard - PolyLine" version="0" id="O20"><dia:attribute name="obj_pos"><dia:point val="5.84978,3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="5.79978,2.95;11.4694,7.25023"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="5.84978,3"/><dia:point val="11.4,3"/><dia:point val="11.4191,7.2"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O18" connection="8"/><dia:connection handle="2" to="O29" connection="1"/></dia:connections></dia:object><dia:object type="Standard - PolyLine" version="0" id="O21"><dia:attribute name="obj_pos"><dia:point val="15.4,3"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.0361,2.95;15.45,12.9504"/></dia:attribute><dia:attribute name="poly_points"><dia:point val="15.4,3"/><dia:point val="15.4,11"/><dia:point val="12.1,11"/><dia:point val="12.0864,12.9"/></dia:attribute><dia:connections><dia:connection handle="3" to="O13" connection="3"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O22"><dia:attribute name="obj_pos"><dia:point val="11.4,9.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="11.35,9.15;11.45,11.05"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="11.4,9.2"/><dia:point val="11.4,11"/></dia:attribute><dia:attribute name="numcp"><dia:int val="2"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O23"><dia:attribute name="obj_pos"><dia:point val="9.558,8.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="9.5077,8.1497;10.4503,8.25535"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="9.558,8.2"/><dia:point val="10.4,8.20505"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O1" connection="8"/><dia:connection handle="1" to="O29" connection="7"/></dia:connections></dia:object><dia:object type="Flowchart - Box" version="0" id="O24"><dia:attribute name="obj_pos"><dia:point val="13.4,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="13.35,12.85;17.5213,14.825"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="13.4,12.9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="4.07125"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.875"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Stream Copy#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="15.4356,13.9875"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O25"><dia:attribute name="obj_pos"><dia:point val="15.4,10.6"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="15.35,10.55;15.45,12.95"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="15.4,10.6"/><dia:point val="15.4,12.9"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute></dia:object><dia:object type="Standard - Line" version="0" id="O26"><dia:attribute name="obj_pos"><dia:point val="12.9153,13.8375"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="12.8652,13.7875;13.45,13.8875"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="12.9153,13.8375"/><dia:point val="13.4,13.8375"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="0" to="O13" connection="8"/><dia:connection handle="1" to="O24" connection="7"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O27"><dia:attribute name="obj_pos"><dia:point val="6.979,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="6.84907,2.94907;7.02993,7.25093"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="6.979,7.2"/><dia:point val="6.9,3"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:attribute name="line_style"><dia:enum val="4"/></dia:attribute><dia:connections><dia:connection handle="0" to="O1" connection="2"/></dia:connections></dia:object><dia:object type="Standard - Line" version="0" id="O28"><dia:attribute name="obj_pos"><dia:point val="0.563434,9.25"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="0.513182,9.19975;0.631877,12.9003"/></dia:attribute><dia:attribute name="conn_endpoints"><dia:point val="0.563434,9.25"/><dia:point val="0.581625,12.85"/></dia:attribute><dia:attribute name="numcp"><dia:int val="1"/></dia:attribute><dia:connections><dia:connection handle="1" to="O11" connection="2"/></dia:connections></dia:object><dia:object type="Flowchart - Box" version="0" id="O29"><dia:attribute name="obj_pos"><dia:point val="10.4,7.2"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="10.35,7.15;14.5265,9.26011"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="10.4,7.2"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="4.0765"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="2.01011"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.5"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Job Manager#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="12.4383,8.35506"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object><dia:object type="Flowchart - Box" version="0" id="O30"><dia:attribute name="obj_pos"><dia:point val="2.6,12.9"/></dia:attribute><dia:attribute name="obj_bb"><dia:rectangle val="2.55,12.85;5.75,14.85"/></dia:attribute><dia:attribute name="elem_corner"><dia:point val="2.6,12.9"/></dia:attribute><dia:attribute name="elem_width"><dia:real val="3.1"/></dia:attribute><dia:attribute name="elem_height"><dia:real val="1.9"/></dia:attribute><dia:attribute name="show_background"><dia:boolean val="true"/></dia:attribute><dia:attribute name="corner_radius"><dia:real val="0.4"/></dia:attribute><dia:attribute name="padding"><dia:real val="0.2"/></dia:attribute><dia:attribute name="text"><dia:composite type="text"><dia:attribute name="string"><dia:string>#Job
 Status#</dia:string></dia:attribute><dia:attribute name="font"><dia:font family="arial" style="0" name="Helvetica"/></dia:attribute><dia:attribute name="height"><dia:real val="0.5"/></dia:attribute><dia:attribute name="pos"><dia:point val="4.15,13.75"/></dia:attribute><dia:attribute name="color"><dia:color val="#000000"/></dia:attribute><dia:attribute name="alignment"><dia:enum val="1"/></dia:attribute></dia:composite></dia:attribute></dia:object></dia:layer></dia:diagram>
diff --git a/doc/figures/times.eps b/doc/figures/times.eps
index 4db99fef5baade8ae6bf142dc69e25d2ad2121e4..475b54d914eb9ed587210eba5a90ac4c7033f3cf 100644
--- a/doc/figures/times.eps
+++ b/doc/figures/times.eps
@@ -213,7 +213,7 @@ gnudict begin
 /PentE { stroke [] 0 setdash gsave
   translate 0 hpt M 4 {72 rotate 0 hpt L} repeat
   closepath stroke grestore } def
-/CircE { stroke [] 0 setdash 
+/CircE { stroke [] 0 setdash
   hpt 0 360 arc stroke } def
 /Opaque { gsave closepath 1 setgray fill grestore 0 setgray closepath } def
 /DiaW { stroke [] 0 setdash vpt add M
@@ -233,7 +233,7 @@ gnudict begin
 /PentW { stroke [] 0 setdash gsave
   translate 0 hpt M 4 {72 rotate 0 hpt L} repeat
   Opaque stroke grestore } def
-/CircW { stroke [] 0 setdash 
+/CircW { stroke [] 0 setdash
   hpt 0 360 arc Opaque stroke } def
 /BoxFill { gsave Rec 1 setgray fill grestore } def
 end
diff --git a/doc/figures/times.gpl b/doc/figures/times.gpl
index ebd71858e4a71206aa2a9c62e0a88189c4362116..ebb4516357345e1ceef76f2de60a14198d09965e 100644
--- a/doc/figures/times.gpl
+++ b/doc/figures/times.gpl
@@ -25,7 +25,7 @@ set yrange [0.05:20]
 set mytics 2
 
 set logscale y
-set grid noxtics ytics mytics 
+set grid noxtics ytics mytics
 
 plot [0:12]	"figures/slurm.dat" title "SLURM" with linespoint, \
 		"figures/rms.dat"   title "RMS" with linespoint, \
diff --git a/doc/figures/times.gpl.png b/doc/figures/times.gpl.png
index 33537dfffb8b789361c571142960a8cb9abd63b4..d9dd3fd669ab57063fdcd740380147634b3f6061 100644
--- a/doc/figures/times.gpl.png
+++ b/doc/figures/times.gpl.png
@@ -25,7 +25,7 @@ set yrange [0.05:20]
 set mytics 2
 
 set logscale y
-set grid noxtics ytics mytics 
+set grid noxtics ytics mytics
 
 plot [0:12]	"slurm.dat" title "SLURM" with linespoint, \
 		"rms.dat"   title "RMS" with linespoint, \
diff --git a/doc/html/ExampleUsage.gif b/doc/html/ExampleUsage.gif
index 692cd5a125c7b56d8937b42f0caadfba0225fd44..5fb596106e9e3534ee8c2e8c5a9b5769c2da38b1 100644
Binary files a/doc/html/ExampleUsage.gif and b/doc/html/ExampleUsage.gif differ
diff --git a/doc/html/UsagePies.gif b/doc/html/UsagePies.gif
index 512e08363d46835f5724cb16aaba58e8923b98ee..e78c0e4d7c8bcfa7c95f435b0ccc7f5d278b9364 100644
Binary files a/doc/html/UsagePies.gif and b/doc/html/UsagePies.gif differ
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index 4c37d082690b66ae9ded9d4c88eabba9a77c5147..80ce63554685efd4dc9693af7e8fa75a5cc09789 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -2,130 +2,130 @@
 
 <h1>Accounting</h1>
 
-<p>NOTE: This documents accounting features available in SLURM version 
-1.3, which are far more extensive than those available in previous 
+<p>NOTE: This documents accounting features available in SLURM version
+1.3, which are far more extensive than those available in previous
 releases.</p>
 
-<p>SLURM can be configured to collect accounting information for every 
-job and job step executed. 
+<p>SLURM can be configured to collect accounting information for every
+job and job step executed.
 Accounting records can be written to a simple text file or a database.
-Information is available about both currently executing jobs and 
+Information is available about both currently executing jobs and
 jobs which have already terminated.
 The <b>sacct</b> command can report resource usage for running or terminated
-jobs including individual tasks, which can be useful to detect load imbalance 
-between the tasks. 
+jobs including individual tasks, which can be useful to detect load imbalance
+between the tasks.
 The <b>sstat</b> command can be used to status only currently running jobs.
 It also can give you valuable information about imbalance between tasks.
 The <b>sreport</b> can be used to generate reports based upon all jobs
 executed in a particular time interval.</p>
 
 <p>There are three distinct plugin types associated with resource accounting.
-The SLURM configuration parameters (in <i>slurm.conf</i>) associated with 
+The SLURM configuration parameters (in <i>slurm.conf</i>) associated with
 these plugins include:</p>
 <ul>
-<li><b>AccountingStorageType</b> controls how detailed job and job 
-step information is recorded. You can store this information in a 
-text file, <a href="http://www.mysql.com/">MySQL</a> or 
-<a href="http://www.postgresql.org/">PostgreSQL</a> 
+<li><b>AccountingStorageType</b> controls how detailed job and job
+step information is recorded. You can store this information in a
+text file, <a href="http://www.mysql.com/">MySQL</a> or
+<a href="http://www.postgresql.org/">PostgreSQL</a>
 database, optionally using SlurmDBD for added security.</li>
-<li><b>JobAcctGatherType</b> is operating system dependent and 
+<li><b>JobAcctGatherType</b> is operating system dependent and
 controls what mechanism is used to collect accounting information.
 Supported values are <i>jobacct_gather/aix</i>, <i>jobacct_gather/linux</i>
 and <i>jobacct_gather/none</i> (no information collected).</li>
-<li><b>JobCompType</b> controls how job completion information is 
+<li><b>JobCompType</b> controls how job completion information is
 recorded. This can be used to record basic job information such
-as job name, user name, allocated nodes, start time, completion 
-time, exit status, etc. If the preservation of only basic job 
+as job name, user name, allocated nodes, start time, completion
+time, exit status, etc. If the preservation of only basic job
 information is required, this plugin should satisfy your needs
-with minimal overhead. You can store this information in a 
-text file, <a href="http://www.mysql.com/">MySQL</a> or 
-<a href="http://www.postgresql.org/">PostgreSQL</a> 
+with minimal overhead. You can store this information in a
+text file, <a href="http://www.mysql.com/">MySQL</a> or
+<a href="http://www.postgresql.org/">PostgreSQL</a>
 database</li>
 </ul>
 
-<p>The use of sacct to view information about jobs 
+<p>The use of sacct to view information about jobs
 is dependent upon AccountingStorageType
 being configured to collect and store that information.
-The use of sreport is dependent upon some database being 
+The use of sreport is dependent upon some database being
 used to store that information.</p>
 
 <p>The use of sacct or sstat to view information about resource usage
   within jobs is dependent upon both JobAcctGatherType and AccountingStorageType
   being configured to collect and store that information.</p>
 
-<p>Storing the accounting information into text files is 
-very simple. Just configure the appropriate plugin (e.g. 
-<i>AccountingStorageType=accounting_storage/filetxt</i> and/or 
-<i>JobCompType=jobcomp/filetxt</i>) and then specify the 
+<p>Storing the accounting information into text files is
+very simple. Just configure the appropriate plugin (e.g.
+<i>AccountingStorageType=accounting_storage/filetxt</i> and/or
+<i>JobCompType=jobcomp/filetxt</i>) and then specify the
 pathname of the file (e.g.
-<i>AccountingStorageLoc=/var/log/slurm/accounting</i> and/or 
+<i>AccountingStorageLoc=/var/log/slurm/accounting</i> and/or
 <i>JobCompLoc=/var/log/slurm/job_completions</i>).
-Use the <i>logrotate</i> or similar tool to prevent the 
+Use the <i>logrotate</i> or similar tool to prevent the
 log files from getting too large.
-Send a SIGHUP signal to the <i>slurmctld</i> daemon 
+Send a SIGHUP signal to the <i>slurmctld</i> daemon
 after moving the files, but before compressing them so
 that new log files will be created.</p>
 
-<p>Storing the data directly into a database from SLURM may seem 
-attractive, but requires the availability of user name and 
-password data not only for the SLURM control daemon (slurmctld), 
+<p>Storing the data directly into a database from SLURM may seem
+attractive, but requires the availability of user name and
+password data not only for the SLURM control daemon (slurmctld),
 but also user commands which need to access the data (sacct, sreport, and
-sacctmgr). 
-Making possibly sensitive information available to all users makes 
+sacctmgr).
+Making possibly sensitive information available to all users makes
 database security more difficult to provide, sending the data through
 an intermediate daemon can provide better security and performance
-(through caching data) and SlurmDBD provides such services. 
-SlurmDBD (SLURM Database Daemon) is written in C, multi-threaded, 
+(through caching data) and SlurmDBD provides such services.
+SlurmDBD (SLURM Database Daemon) is written in C, multi-threaded,
 secure and fast.
 The configuration required to use SlurmDBD will be described below.
 Storing information directly into database would be similar.</p>
 
 <p>Note that SlurmDBD relies upon existing SLURM plugins
-for authentication and database use, but the other SLURM 
+for authentication and database use, but the other SLURM
 commands and daemons are not required on the host where
-SlurmDBD is installed. Install the <i>slurmdbd</i> and 
+SlurmDBD is installed. Install the <i>slurmdbd</i> and
 <i>slurm-plugins</i> RPMs on the computer when SlurmDBD
 is to execute.</p>
 
 <h2>Infrastructure</h2>
 
-<p>With the SlurmDBD, we are able to collect data from multiple 
+<p>With the SlurmDBD, we are able to collect data from multiple
 clusters in a single location.
 This does impose some constraints on the user naming and IDs.
 Accounting is maintained by user name (not user ID), but a
-given user name should refer to the same person across all 
-of the computers. 
-Authentication relies upon user ID numbers, so those must 
+given user name should refer to the same person across all
+of the computers.
+Authentication relies upon user ID numbers, so those must
 be uniform across all computers communicating with each
 SlurmDBD, at least for users requiring authentication.
 In particular, the configured <i>SlurmUser</i> must have the
 same name and ID across all clusters.
-If you plan to have administrators of user accounts, limits, 
-etc. they must also have consistent names and IDs across all 
+If you plan to have administrators of user accounts, limits,
+etc. they must also have consistent names and IDs across all
 clusters.
-If you plan to restrict access to accounting records (e.g. 
-only permit a user to view records of his jobs), then all 
+If you plan to restrict access to accounting records (e.g.
+only permit a user to view records of his jobs), then all
 users should have consistent names and IDs.</p>
 
-<p>The best way to insure security of the data is by authenticating 
-communications to the SlurmDBD and we recommend 
+<p>The best way to insure security of the data is by authenticating
+communications to the SlurmDBD and we recommend
 <a href="http://home.gna.org/munge/">Munge</a> for that purpose.
-If you have one cluster managed by SLURM and execute the SlurmDBD 
+If you have one cluster managed by SLURM and execute the SlurmDBD
 on that one cluster, the normal Munge configuration will suffice.
-Otherwise Munge should then be installed on all nodes of all 
+Otherwise Munge should then be installed on all nodes of all
 SLURM managed clusters, plus the machine where SlurmDBD executes.
-You then have a choice of either having a single Munge key for 
-all of these computers or maintaining a unique key for each of the 
+You then have a choice of either having a single Munge key for
+all of these computers or maintaining a unique key for each of the
 clusters plus a second key for communications between the clusters
 for better security.
-Munge enhancements are planned to support two keys within a single 
-configuration file, but presently two different daemons must be 
-started with different configurations to support two different keys 
-(create two key files and start the daemons with the 
+Munge enhancements are planned to support two keys within a single
+configuration file, but presently two different daemons must be
+started with different configurations to support two different keys
+(create two key files and start the daemons with the
 <i>--key-file</i> option to locate the proper key plus the
 <i>--socket</i> option to specify distinct local domain sockets for each).
-The pathname of local domain socket will be needed in the SLURM 
-and SlurmDBD configuration files (slurm.conf and slurmdbd.conf 
+The pathname of local domain socket will be needed in the SLURM
+and SlurmDBD configuration files (slurm.conf and slurmdbd.conf
 respectively, more details are provided below).</p>
 
 <p?Whether you use any authentication module or not you will need to have
@@ -176,7 +176,7 @@ the database with.</li>
 <p>While the SlurmDBD will work with a flat text file for recording
 job completions and such this configuration will not allow
 "associations" between a user and account. A database allows such
-a configuration. 
+a configuration.
 
 <p><b>MySQL is the preferred database, PostgreSQL is
 supported for job and step accounting only.</b> The infrastructure for
@@ -191,7 +191,7 @@ mysql_config and pg-config to find out the information it needs
 about installed libraries and headers. You can specify where your
 mysql_config script is with the
 </i>--with-mysql_conf=/path/to/mysql_config</i> option when configuring your
-slurm build. A similar option is also available for PostgreSQL. 
+slurm build. A similar option is also available for PostgreSQL.
 On a successful configure, output is something like this: </p>
 <pre>
 checking for mysql_config... /usr/bin/mysql_config
@@ -208,20 +208,20 @@ directly, but that offers minimal security. </p>
 archiving information in SlurmDBD. SlurmDBD has a separate configuration
 file which is documented in a separate section.
 Note that you can write accounting information to SlurmDBD
-while job completion records are written to a text file or 
-not maintained at all. 
-If you don't set the configuration parameters that begin 
+while job completion records are written to a text file or
+not maintained at all.
+If you don't set the configuration parameters that begin
 with "AccountingStorage" then accounting information will not be
 referenced or recorded.</p>
 
 <ul>
 <li><b>AccountingStorageEnforce</b>:
 This option contains a comma separated list of options you may want to
- enforce.  The valid options are 
+ enforce.  The valid options are
 <ul>
 <li>associations - This will prevent users from running jobs if
 their <i>association</i> is not in the database. This option will
-prevent users from accessing invalid accounts.  
+prevent users from accessing invalid accounts.
 </li>
 <li>limits - This will enforce limits set to associations.  By setting
   this option, the 'associations' option is also set.
@@ -237,18 +237,18 @@ each association in the database.  By setting this option, the
   set to true.
 </li>
 </ul>
-(NOTE: The association is a combination of cluster, account, 
+(NOTE: The association is a combination of cluster, account,
 user names and optional partition name.)
 <br>
-Without AccountingStorageEnforce being set (the default behavior) 
+Without AccountingStorageEnforce being set (the default behavior)
 jobs will be executed based upon policies configured in SLURM on each
 cluster.
-<br> 
+<br>
 It is advisable to run without the option 'limits' set when running a
 scheduler on top of SLURM, like Moab, that does not update in real
 time their limits per association.</li>
 
-<li><b>AccountingStorageHost</b>: The name or address of the host where 
+<li><b>AccountingStorageHost</b>: The name or address of the host where
 SlurmDBD executes</li>
 
 <li><b>AccountingStoragePass</b>: If using SlurmDBD with a second Munge
@@ -262,68 +262,68 @@ The network port that SlurmDBD accepts communication on.</li>
 Set to "accounting_storage/slurmdbd".</li>
 
 <li><b>ClusterName</b>:
-Set to a unique name for each Slurm-managed cluster so that 
+Set to a unique name for each Slurm-managed cluster so that
 accounting records from each can be identified.</li>
 <li><b>TrackWCKey</b>:
 Boolean.  If you want to track wckeys (Workload Characterization Key)
   of users.  A Wckey is an orthogonal way to do accounting against
   maybe a group of unrelated accounts. WCKeys can be defined using
-  sacctmgr add wckey 'name'.  When a job is run use srun --wckey and 
+  sacctmgr add wckey 'name'.  When a job is run use srun --wckey and
   time will be summed up for this wckey.
 </li>
 </ul>
 
 <h2>SlurmDBD Configuration</h2>
 
-<p>SlurmDBD requires its own configuration file called "slurmdbd.conf". 
-This file should be only on the computer where SlurmDBD executes and 
+<p>SlurmDBD requires its own configuration file called "slurmdbd.conf".
+This file should be only on the computer where SlurmDBD executes and
 should only be readable by the user which executes SlurmDBD (e.g. "slurm").
 This file should be protected from unauthorized access since it
 contains a database login name and password.
-See "man slurmdbd.conf" for a more complete description of the 
-configuration parameters. 
+See "man slurmdbd.conf" for a more complete description of the
+configuration parameters.
 Some of the more important parameters include:</p>
 
 <ul>
 <li><b>AuthInfo</b>:
-If using SlurmDBD with a second Munge daemon, store the pathname of 
+If using SlurmDBD with a second Munge daemon, store the pathname of
 the named socket used by Munge to provide enterprise-wide.
 Otherwise the default Munge daemon will be used.</li>
 
 <li><b>AuthType</b>:
-Define the authentication method for communications between SLURM 
+Define the authentication method for communications between SLURM
 components. A value of "auth/munge" is recommended.</li>
 
 <li><b>DbdHost</b>:
-The name of the machine where the Slurm Database Daemon is executed. 
-This should be a node name without the full domain name (e.g. "lx0001"). 
+The name of the machine where the Slurm Database Daemon is executed.
+This should be a node name without the full domain name (e.g. "lx0001").
 This defaults to <i>localhost</i> but should be supplied to avoid a
 warning message.</li>
 
 <li><b>DbdPort</b>:
-The port number that the Slurm Database Daemon (slurmdbd) listens 
-to for work. The default value is SLURMDBD_PORT as established at system 
+The port number that the Slurm Database Daemon (slurmdbd) listens
+to for work. The default value is SLURMDBD_PORT as established at system
 build time. If none is explicitly specified, it will be set to 6819.
 This value must be equal to the <i>AccountingStoragePort</i> parameter in the
 slurm.conf file.</li>
 
 <li><b>LogFile</b>:
-Fully qualified pathname of a file into which the Slurm Database Daemon's 
+Fully qualified pathname of a file into which the Slurm Database Daemon's
 logs are written.
 The default value is none (performs logging via syslog).</li>
 
 <li><b>PluginDir</b>:
-Identifies the places in which to look for SLURM plugins. 
-This is a colon-separated list of directories, like the PATH 
-environment variable. 
+Identifies the places in which to look for SLURM plugins.
+This is a colon-separated list of directories, like the PATH
+environment variable.
 The default value is the prefix given at configure time + "/lib/slurm".</li>
 
 <li><b>SlurmUser</b>:
-The name of the user that the <i>slurmctld</i> daemon executes as. 
+The name of the user that the <i>slurmctld</i> daemon executes as.
 This user must exist on the machine executing the Slurm Database Daemon
 and have the same user ID as the hosts on which <i>slurmctld</i> execute.
 For security purposes, a user other than "root" is recommended.
-The default value is "root". This name should also be the same SlurmUser 
+The default value is "root". This name should also be the same SlurmUser
 on all clusters reporting to the SlurmDBD.</li>
 
 <li><b>StorageHost</b>:
@@ -333,13 +333,13 @@ Ideally this should be the host on which SlurmDBD executes. But could
 be a different machine.</li>
 
 <li><b>StorageLoc</b>:
-Specifies the name of the database where accounting 
+Specifies the name of the database where accounting
 records are written, for databases the default database is
 slurm_acct_db. Note the name can not have a '/' in it or the
 default will be used.</li>
 
 <li><b>StoragePass</b>:
-Define the password used to gain access to the database to store 
+Define the password used to gain access to the database to store
 the job accounting data.</li>
 
 <li><b>StoragePort</b>:
@@ -347,13 +347,13 @@ Define the port on which the database is listening.</li>
 
 <li><b>StorageType</b>:
 Define the accounting storage mechanism type.
-Acceptable values at present include 
+Acceptable values at present include
 "accounting_storage/mysql" and "accounting_storage/pgsql".
 The value "accounting_storage/mysql" indicates that accounting records
-should be written to a MySQL database specified by the 
+should be written to a MySQL database specified by the
 <i>StorageLoc</i> parameter.
 The value "accounting_storage/pgsql" indicates that accounting records
-should be written to a PostgreSQL database specified by the 
+should be written to a PostgreSQL database specified by the
 <i>StorageLoc</i> parameter.
 This value must be specified.</li>
 
@@ -392,7 +392,7 @@ Query OK, 0 rows affected (0.00 sec)
 
 or with a password...
 
-mysql> grant all on slurm_acct_db.* TO 'slurm'@'localhost' 
+mysql> grant all on slurm_acct_db.* TO 'slurm'@'localhost'
     -> identified by 'some_pass' with grant option;
 Query OK, 0 rows affected (0.00 sec)
 </pre>
@@ -481,12 +481,12 @@ information.</p>
 <h2>Tools</h2>
 
 <p>There are a few tools available to work with accounting data,
-<b>sacct</b>, <b>sacctmgr</b>, and <b>sreport</b>. 
+<b>sacct</b>, <b>sacctmgr</b>, and <b>sreport</b>.
 These tools all get or set data through the SlurmDBD daemon.
 <ul>
-<li><b>sacct</b> is used to generate accounting report for both 
+<li><b>sacct</b> is used to generate accounting report for both
 running and completed jobs.</li>
-<li><b>sacctmgr</b> is used to manage associations in the database: 
+<li><b>sacctmgr</b> is used to manage associations in the database:
 add or remove clusters, add or remove users, etc.</li>
 <li><b>sreport</b> is used to generate various reports on usage collected over a
 given time period.</li>
@@ -499,7 +499,7 @@ A tool to report node state information is also under development.</p>
 
 <h2>Database Configuration</h2>
 
-<p>Accounting records are maintained based upon what we refer 
+<p>Accounting records are maintained based upon what we refer
 to as an <i>Association</i>,
 which consists of four elements: cluster, account, user names and
 an optional partition name. Use the <i>sacctmgr</i>
@@ -513,25 +513,25 @@ execute this line:</p>
 sacctmgr add cluster snowflake
 </pre>
 
-<p>Add accounts "none" and "test" to cluster "snowflake" with an execute 
+<p>Add accounts "none" and "test" to cluster "snowflake" with an execute
 line of this sort:</p>
 <pre>
 sacctmgr add account none,test Cluster=snowflake \
-  Description="none" Organization="none" 
+  Description="none" Organization="none"
 </pre>
 
 <p>If you have more clusters you want to add these accounts, to you
 can either not specify a cluster, which will add the accounts to all
 clusters in the system, or comma separate the cluster names you want
 to add to in the cluster option.
-Note that multiple accounts can be added at the same time 
-by comma separating the names. 
-Some <i>description</i> of the account and the <i>organization</i> which 
-it belongs must be specified. 
+Note that multiple accounts can be added at the same time
+by comma separating the names.
+Some <i>description</i> of the account and the <i>organization</i> which
+it belongs must be specified.
 These terms can be used later to generated accounting reports.
-Accounts may be arranged in a hierarchical fashion, for example accounts 
-<i>chemistry</i> and <i>physics</i> may be children of the account <i>science</i>. 
-The hierarchy may have an arbitrary depth. 
+Accounts may be arranged in a hierarchical fashion, for example accounts
+<i>chemistry</i> and <i>physics</i> may be children of the account <i>science</i>.
+The hierarchy may have an arbitrary depth.
 Just specify the <i>parent=''</i> option in the add account line to construct
 the hierarchy.
 For the example above execute</p>
@@ -553,9 +553,9 @@ sacctmgr add user da default=test
 the slurm.conf of the cluster <i>snowflake</i> then user <i>da</i> would be
 allowed to run in account <i>test</i> and any other accounts added
 in the future.
-Any attempt to use other accounts will result in the job being 
-aborted. 
-Account <i>test</i> will be the default if he doesn't specify one in 
+Any attempt to use other accounts will result in the job being
+aborted.
+Account <i>test</i> will be the default if he doesn't specify one in
 the job submission command.</p>
 
 <p>Partition names can also be added to an "add user" command with the
@@ -564,7 +564,7 @@ a slurm partition.</p>
 
 <h2>Cluster Options</h2>
 
-<p>When either adding or modifying a cluster, these are the options 
+<p>When either adding or modifying a cluster, these are the options
 available with sacctmgr:
 <ul>
 <li><b>Name=</b> Cluster name</li>
@@ -573,7 +573,7 @@ available with sacctmgr:
 
 <h2>Account Options</h2>
 
-<p>When either adding or modifying an account, the following sacctmgr 
+<p>When either adding or modifying an account, the following sacctmgr
 options are available:
 <ul>
 <li><b>Cluster=</b> Only add this account to these clusters.
@@ -595,27 +595,27 @@ The account is added to all defined clusters by default.</li>
 
 <h2>User Options</h2>
 
-<p>When either adding or modifying a user, the following sacctmgr 
+<p>When either adding or modifying a user, the following sacctmgr
 options are available:
 
 <ul>
 <li><b>Account=</b> Account(s) to add user to</li>
 
-<li><b>AdminLevel=</b> This field is used to allow a user to add accounting 
-privileges to this user. Valid options are 
+<li><b>AdminLevel=</b> This field is used to allow a user to add accounting
+privileges to this user. Valid options are
 <ul>
 <li>None</li>
 <li>Operator: can add, modify,and remove users, and add other operators)</li>
-<li>Admin: In addition to operator privileges these users can add, modify, 
+<li>Admin: In addition to operator privileges these users can add, modify,
 and remove accounts and clusters</li>
 </ul>
 
 <li><b>Cluster=</b> Only add to accounts on these clusters (default is all clusters)</li>
 
-<li><b>DefaultAccount=</b> Default account for the user, used when no account 
+<li><b>DefaultAccount=</b> Default account for the user, used when no account
 is specified when a job is submitted. (Required on creation)</li>
 
-<li><b>DefaultWCKey=</b> Default wckey for the user, used when no wckey 
+<li><b>DefaultWCKey=</b> Default wckey for the user, used when no wckey
 is specified when a job is submitted. (Only used when tracking wckeys.)</li>
 
 <li><b>Name=</b> User name</li>
@@ -627,10 +627,10 @@ is specified when a job is submitted. (Only used when tracking wckeys.)</li>
 <h2>Limit enforcement</h2>
 
 <p>When limits are developed they will work in this order.
-If a user has a limit set SLURM will read in those, 
-if not we will refer to the account associated with the job. 
-If the account doesn't have the limit set we will refer to 
-the cluster's limits. 
+If a user has a limit set SLURM will read in those,
+if not we will refer to the account associated with the job.
+If the account doesn't have the limit set we will refer to
+the cluster's limits.
 If the cluster doesn't have the limit set no limit will be enforced.
 <p>All of the above entities can include limits as described below...
 
@@ -677,7 +677,7 @@ If the cluster doesn't have the limit set no limit will be enforced.
 
 <li><b>GrpWall=</b> The maximum wall clock time any job submitted to
   this group can run for.  If this limit is reached submission requests
-  will be denied. 
+  will be denied.
 </li>
 
 <!-- For future use
@@ -704,7 +704,7 @@ If the cluster doesn't have the limit set no limit will be enforced.
   have from this association.  If this limit is reached the job will
   be denied at submission.
 </li>
- 
+
 <li><b>MaxSubmitJobs=</b> The maximum number of jobs able to be submitted
   to the system at any given time from this association.  If
   this limit is reached new submission requests will be denied until
@@ -723,7 +723,7 @@ If the cluster doesn't have the limit set no limit will be enforced.
 
 <h2>Modifying Entities</h2>
 
-<p>When modifying entities, you can specify many different options in 
+<p>When modifying entities, you can specify many different options in
 SQL-like fashion, using key words like <i>where</i> and <i>set</i>.
 A typical execute line has the following form:
 <pre>
@@ -735,22 +735,22 @@ sacctmgr modify &lt;entity&gt; set &lt;options&gt; where &lt;options&gt;
 sacctmgr modify user set default=none where default=test
 </pre>
 <p>will change all users with a default account of "test" to account "none".
-Once an entity has been added, modified or removed, the change is 
-sent to the appropriate SLURM daemons and will be available for use 
+Once an entity has been added, modified or removed, the change is
+sent to the appropriate SLURM daemons and will be available for use
 instantly.</p>
 
 <h2>Removing Entities</h2>
 
 <p>Removing entities using an execute line similar to the modify example above,
 but without the set options.
-For example, remove all users with a default account "test" using the following 
+For example, remove all users with a default account "test" using the following
 execute line:</p>
 <pre>
 sacctmgr remove user where default=test
 </pre>
 <p>Note: In most cases, removed entities are preserved, but flagged
-as deleted. 
-If an entity has existed for less than 1 day, the entity will be removed 
+as deleted.
+If an entity has existed for less than 1 day, the entity will be removed
 completely. This is meant to clean up after typographic errors.</p>
 
 <p style="text-align: center;">Last modified 2 March 2009</p>
diff --git a/doc/html/accounting_storageplugins.shtml b/doc/html/accounting_storageplugins.shtml
index ebe7221031a708700bb761a12bc76dca22a091c6..de3cac11708ce4de285867f39da4b7f56555d1b5 100644
--- a/doc/html/accounting_storageplugins.shtml
+++ b/doc/html/accounting_storageplugins.shtml
@@ -27,7 +27,7 @@ for the type of accounting package. We currently use
 <li><b>pgsql</b>&#151; Store information in a postgresql database.
 <li><b>none</b>&#151; Information is not stored anywhere.
 </ul>
-<p>The programmer is urged to study 
+<p>The programmer is urged to study
 <span class="commandline">src/plugins/accounting_storage/mysql</span>
 for a sample implementation of a SLURM Accounting Storage plugin.
 <p> The Accounting Storage plugin was written to be a interface
@@ -35,7 +35,7 @@ to storage data collected by the Job Accounting Gather plugin.  When
 adding a new database you may want to add common functions in a common
 file in the src/database dir.  Refer to src/database/mysql_common.c|.h for an
 example so other plugins can also use that database type to write out
-information. 
+information.
 <p class="footer"><a href="#top">top</a>
 
 
@@ -44,12 +44,12 @@ information.
 The Job Accounting Storage API uses hooks in the slurmctld.
 
 <p>All of the following functions are required. Functions which are not
-implemented must be stubbed. 
+implemented must be stubbed.
 
 <h4>Functions called by the accounting_storage plugin</h4>
 
 <p class="commandline">void *acct_storage_p_get_connection(bool
-  make_agent, int conn_num, bool rollback, char *location) 
+  make_agent, int conn_num, bool rollback, char *location)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 acct_storage_p_get_connection() is called to get a connection to the
   storage medium. acct_storage_p_close_connection() should be used to
@@ -69,7 +69,7 @@ happen or not (in use with databases that support rollback).<br>
 used inside the plugin to connection to the storage type on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
-<p class="commandline">int acct_storage_p_close_connection(void **db_conn) 
+<p class="commandline">int acct_storage_p_close_connection(void **db_conn)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 acct_storage_p_close_connection() is called at the end of the program that has
 called acct_storage_p_get_connection this function closes the connection to
@@ -77,7 +77,7 @@ the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input/output) connection to
 the storage type, all memory will be freed inside this function and
-set to NULL. 
+set to NULL.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -103,9 +103,9 @@ Called to add users to the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">user_list</span> (input) list of
-acct_user_rec_t *'s containing information about the users being added.<br> 
+acct_user_rec_t *'s containing information about the users being added.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -117,7 +117,7 @@ Called to link specified users to the specified accounts as coordinators.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">acct_list</span> (input) list of
 acct_account_rec_t *'s containing information about the accounts to
 add the coordinators to. <br>
@@ -148,7 +148,7 @@ Called to add clusters to the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">cluster_list</span> (input) list of
 acct_cluster_rec_t *'s containing information about the clusters to add. <br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -162,7 +162,7 @@ Called to add associations to the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">association_list</span> (input) list of
 acct_association_rec_t *'s containing information about the
 associations to add. <br>
@@ -177,7 +177,7 @@ Called to add QOS' to the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">qos_list</span> (input) list of
 acct_qos_rec_t *'s containing information about the qos to add. <br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -191,7 +191,7 @@ Called to add wckeys to the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">wckey_list</span> (input) list of
 acct_wckey_rec_t *'s containing information about the wckeys to add. <br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -223,7 +223,7 @@ Used to modify existing users in the storage type.  The condition
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">user_cond</span> (input) conditional about
 which users need to change.  User names or ids should not need to be stated.<br>
 <span class="commandline">user</span> (input) what the changes
@@ -245,7 +245,7 @@ Used to modify existing accounts in the storage type.  The condition
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">acct_cond</span> (input) conditional about
 which accounts need to change.  Account names should not need to be stated.<br>
 <span class="commandline">acct</span> (input) what the changes
@@ -267,7 +267,7 @@ Used to modify existing clusters in the storage type.  The condition
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">cluster_cond</span> (input) conditional about
 which clusters need to change.  Cluster names should not need to be stated.<br>
 <span class="commandline">cluster</span> (input) what the changes
@@ -289,7 +289,7 @@ Used to modify existing associations in the storage type.  The condition
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">assoc_cond</span> (input) conditional about
 which associations need to change.  Association ids should not need to be stated.<br>
 <span class="commandline">assoc</span> (input) what the changes
@@ -311,7 +311,7 @@ Used to modify existing qos in the storage type.  The condition
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">qos_cond</span> (input) conditional about
 which qos need to change.  Qos names should not need to be stated.<br>
 <span class="commandline">qos</span> (input) what the changes
@@ -333,7 +333,7 @@ Used to modify existing wckeys in the storage type.  The condition
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">wckey_cond</span> (input) conditional about
 which wckeys need to change.  Wckey names should not need to be stated.<br>
 <span class="commandline">wckey</span> (input) what the changes
@@ -367,7 +367,7 @@ Used to remove users from the storage type.  This will remove all
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">user_cond</span> (input) conditional about
 which users to be removed.  User names or ids should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -376,16 +376,16 @@ removed on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-List acct_storage_p_remove_coord(void *db_conn, uint32_t uid, 
+List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 List acct_list, acct_user_cond_t *user_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to remove coordinators from the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">acct_list</span> (input) list of accounts
-associated with the users.<br> 
+associated with the users.<br>
 <span class="commandline">user_cond</span> (input) conditional about
 which users to be removed as coordinators.  User names or ids should be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -394,7 +394,7 @@ removed as coordinators on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-List acct_storage_p_remove_accounts(void *db_conn, uint32_t uid, 
+List acct_storage_p_remove_accounts(void *db_conn, uint32_t uid,
 acct_account_cond_t *acct_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to remove accounts from the storage type. This will remove all
@@ -405,7 +405,7 @@ Used to remove accounts from the storage type. This will remove all
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">acct_cond</span> (input) conditional about
 which accounts to be removed.  Account names should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -414,16 +414,16 @@ removed on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid, 
+List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 acct_cluster_cond_t *cluster_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to remove clusters from the storage type. This will remove all
   associations from these clusters.  You need to make sure no jobs are
-  running with any association that is to be removed. 
+  running with any association that is to be removed.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">cluster_cond</span> (input) conditional about
 which clusters to be removed.  Cluster names should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -436,11 +436,11 @@ List acct_storage_p_remove_associations(void *db_conn, uint32_t uid,
 acct_association_cond_t *assoc_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to remove associations from the storage type.  You need to make
-  sure no jobs are running with any association that is to be removed. 
+  sure no jobs are running with any association that is to be removed.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">assoc_cond</span> (input) conditional about
 which associations to be removed.  Association ids should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -456,7 +456,7 @@ Used to remove qos from the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">qos_cond</span> (input) conditional about
 which qos to be removed.  Qos names should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -472,7 +472,7 @@ Used to remove wckeys from the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">wckey_cond</span> (input) conditional about
 which wckeys to be removed.  Wckey names should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -503,7 +503,7 @@ Get a list of acct_user_rec_t *'s based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">user_cond</span> (input) conditional about
 which users are to be returned.  User names or ids should not need to
 be stated.<br>
@@ -521,7 +521,7 @@ Get a list of acct_account_rec_t *'s based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">acct_cond</span> (input) conditional about
 which accounts are to be returned.  Account names should not need to
 be stated.<br>
@@ -539,7 +539,7 @@ Get a list of acct_cluster_rec_t *'s based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">cluster_cond</span> (input) conditional about
 which clusters are to be returned.  Cluster names should not need to
 be stated.<br>
@@ -557,7 +557,7 @@ Get a list of acct_association_rec_t *'s based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">assoc_cond</span> (input) conditional about
 which associations are to be returned.  Association names should not need to
 be stated.<br>
@@ -575,7 +575,7 @@ Get a list of acct_qos_rec_t *'s based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">qos_cond</span> (input) conditional about
 which qos are to be returned.  Qos names should not need to
 be stated.<br>
@@ -593,7 +593,7 @@ Get a list of acct_wckey_rec_t *'s based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">wckey_cond</span> (input) conditional about
 which wckeys are to be returned.  Wckey names should not need to
 be stated.<br>
@@ -611,7 +611,7 @@ Get a list of acct_txn_rec_t *'s (transactions) based on the conditional sent.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">txn_cond</span> (input) conditional about
 which transactions are to be returned.  Transaction ids should not need to
 be stated.<br>
@@ -629,14 +629,14 @@ Get usage for a specific association or wckey.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">in</span> (input/out) can be anything that
-gathers usage like acct_association_rec_t * or acct_wckey_rec_t *.<br> 
+gathers usage like acct_association_rec_t * or acct_wckey_rec_t *.<br>
 <span class="commandline">type</span> (input) really
 slurmdbd_msg_type_t should let the plugin know what the structure is
-that was sent in some how.<br> 
-<span class="commandline">start</span> (input) start time of the usage.<br> 
-<span class="commandline">end</span> (input) end time of the usage.<br> 
+that was sent in some how.<br>
+<span class="commandline">start</span> (input) start time of the usage.<br>
+<span class="commandline">end</span> (input) end time of the usage.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -648,7 +648,7 @@ roll up association, cluster, and wckey usage in the storage.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">start</span> (input) start time of the rollup.<br> 
+<span class="commandline">start</span> (input) start time of the rollup.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -665,7 +665,7 @@ the storage type.<br>
 is on.<br>
 <span class="commandline">node_ptr</span> (input) pointer to the node
 structure marked down.<br>
-<span class="commandline">event_time</span> (input) time event happened.<br> 
+<span class="commandline">event_time</span> (input) time event happened.<br>
 <span class="commandline">reason</span> (input) if different from what
 is set in the node_ptr, the reason the node is down.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -684,7 +684,7 @@ the storage type.<br>
 is on.<br>
 <span class="commandline">node_ptr</span> (input) pointer to the node
 structure marked up.<br>
-<span class="commandline">event_time</span> (input) time event happened.<br> 
+<span class="commandline">event_time</span> (input) time event happened.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -716,18 +716,18 @@ Get usage for a specific cluster.
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
 <span class="commandline">uid</span> (input) uid of user calling the
-function.<br> 
+function.<br>
 <span class="commandline">cluster_rec</span> (input/out)
 acct_cluster_rec_t * already set with the cluster name.  Usage will be
-filled in.<br> 
+filled in.<br>
 <span class="commandline">type</span> (input) really
 slurmdbd_msg_type_t should let the plugin know what the structure is
-that was sent in some how for this it is just DBD_GET_CLUSTER_USAGE.<br> 
-<span class="commandline">start</span> (input) start time of the usage.<br> 
-<span class="commandline">end</span> (input) end time of the usage.<br> 
+that was sent in some how for this it is just DBD_GET_CLUSTER_USAGE.<br>
+<span class="commandline">start</span> (input) start time of the usage.<br>
+<span class="commandline">end</span> (input) end time of the usage.<br>
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
-the storage type. 
+the storage type.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -749,35 +749,35 @@ running on the host is grabbed from the connection.<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_job_start(void *db_conn, struct job_record *job_ptr) 
+int jobacct_storage_p_job_start(void *db_conn, struct job_record *job_ptr)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_storage_p_job_start() is called in the jobacct plugin when a
 job starts, inserting information into the database about the new job.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">job_ptr</span> (input) information about the job in 
+<span class="commandline">job_ptr</span> (input) information about the job in
 slurmctld.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_job_complete(void *db_conn, struct job_record *job_ptr) 
+int jobacct_storage_p_job_complete(void *db_conn, struct job_record *job_ptr)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_storage_p_job_complete() is called in the jobacct plugin when
 a job completes, this updates info about end of a job.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">job_ptr</span> (input) information about the job in 
+<span class="commandline">job_ptr</span> (input) information about the job in
 slurmctld.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_step_start(void *db_conn, struct step_record *step_ptr) 
+int jobacct_storage_p_step_start(void *db_conn, struct step_record *step_ptr)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_storage_p_step_start() is called in the jobacct plugin at the
 allocation of a new step in the slurmctld, this inserts info about the
@@ -792,12 +792,12 @@ slurmctld.
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_step_complete(void *db_conn, struct step_record *step_ptr) 
+int jobacct_storage_p_step_complete(void *db_conn, struct step_record *step_ptr)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_storage_p_step_complete() is called in the jobacct plugin at
 the end of a step in the slurmctld, this updates the ending
 information about a step.
-<p style="margin-left:.2in"><b>Arguments</b>:<br> 
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
 <span class="commandline">step_ptr</span> (input) information about the step in
@@ -807,29 +807,29 @@ slurmctld.
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_job_suspend(void *db_conn, struct job_record *job_ptr) 
+int jobacct_storage_p_job_suspend(void *db_conn, struct job_record *job_ptr)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_storage_p_suspend() is called in the jobacct plugin when a
 job is suspended or resumed in the slurmctld, this updates the
-database about the suspended time of the job. 
+database about the suspended time of the job.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">job_ptr</span> (input) information about the job in 
+<span class="commandline">job_ptr</span> (input) information about the job in
 slurmctld.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">none</span>
 
 <p class="commandline">
 List jobacct_storage_p_get_jobs_cond(void *db_conn, uint32_t uid,
-acct_job_cond_t *job_cond) 
+acct_job_cond_t *job_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_storage_p_get_jobs_cond() is called to get a list of jobs from the
-database given the conditional. 
+database given the conditional.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">uid</span> (input) uid of user calling the function.<br>
 <span class="commandline">job_cond</span> (input) conditional about
 which jobs to get.  Job ids should not need to be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
@@ -837,7 +837,7 @@ which jobs to get.  Job ids should not need to be stated.<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_archive(void *db_conn, acct_archive_cond_t *arch_cond) 
+int jobacct_storage_p_archive(void *db_conn, acct_archive_cond_t *arch_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 used to archive old data.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
@@ -850,7 +850,7 @@ what to archive.<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_storage_p_archive_load(void *db_conn, acct_archive_rect *arch_rec) 
+int jobacct_storage_p_archive_load(void *db_conn, acct_archive_rect *arch_rec)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 used to load old archive data.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
@@ -869,13 +869,13 @@ Used to update shares used in the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type.<br>
-<span class="commandline">acct_list</span> (input) List of shares_used_object_t.<br> 
+<span class="commandline">acct_list</span> (input) List of shares_used_object_t.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster, time_t event_time) 
+int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster, time_t event_time)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 used to mark all jobs in the storage type as finished.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
@@ -917,7 +917,7 @@ database with.
 </dl>
 
 <h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM Accounting Storage API. Future 
+<p> This document describes version 1 of the SLURM Accounting Storage API. Future
 releases of SLURM may revise this API. An Accounting Storage plugin conveys its
 ability to implement a particular API version using the mechanism outlined
 for SLURM plugins.
diff --git a/doc/html/api.shtml b/doc/html/api.shtml
index 68d83d0b01034efd1b50f68baeb5b857f3e6a419..3188ccd5dabfa9ee0216fa8a540bfc622eece38d 100644
--- a/doc/html/api.shtml
+++ b/doc/html/api.shtml
@@ -3,13 +3,13 @@
 <h2><a name="top">SLURM APIs</a></h2>
 
 <h3>Overview</h3>
-<p>All of the SLURM commands utilize a collection of Application Programming 
-Interfaces (APIs). 
-User and system applications can directly use these APIs as desired to 
+<p>All of the SLURM commands utilize a collection of Application Programming
+Interfaces (APIs).
+User and system applications can directly use these APIs as desired to
 achieve tighter integration with SLURM.
-For example, SLURM data structures and error codes can be directly 
+For example, SLURM data structures and error codes can be directly
 examined rather than executing SLURM commands and parsing their output.
-This document describes SLURM APIs. 
+This document describes SLURM APIs.
 You should see the man pages for individual APIs to get more details.</p>
 
 <h3>Get Overall SLURM Information</h3>
@@ -17,8 +17,8 @@ You should see the man pages for individual APIs to get more details.</p>
 
 <li><b>slurm_api_version</b>&#151;Get SLURM API version number.</li>
 
-<li><b>slurm_load_ctl_conf</b>&#151;Load system-wide configuration 
-specifications. Free with <i>slurm_free_ctl_conf</i> to avoid memory 
+<li><b>slurm_load_ctl_conf</b>&#151;Load system-wide configuration
+specifications. Free with <i>slurm_free_ctl_conf</i> to avoid memory
 leak.</li>
 
 <li><b>slurm_print_ctl_conf</b>&#151;Print system-wide configuration
@@ -34,7 +34,7 @@ specifications.</li>
 <h3>Get Job Information</h3>
 <ul>
 
-<li><b>slurm_pid2jobid</b>&#151;For a given process ID on a node 
+<li><b>slurm_pid2jobid</b>&#151;For a given process ID on a node
 get the corresponding SLURM job ID.</li>
 
 <li><b>slurm_get_end_time</b>&#151;For a given SLURM job ID
@@ -49,8 +49,8 @@ all jobs.</li>
 <li><b>slurm_print_job_info</b>&#151;Print information about
 a specific job.</li>
 
-<li><b>slurm_get_select_jobinfo</b>&#151;Get <i>select</i> plugin 
-specific information associated with the job. The information 
+<li><b>slurm_get_select_jobinfo</b>&#151;Get <i>select</i> plugin
+specific information associated with the job. The information
 available is will vary by select plugin type configured.</li>
 
 <li><b>slurm_free_job_info_msg</b>&#151;Free storage allocated by
@@ -64,7 +64,7 @@ available is will vary by select plugin type configured.</li>
 <ul>
 
 <li><b>slurm_get_job_steps</b>&#151;Load job step information.
-Free with <i>slurm_free_job_step_info_response_msg</i> to 
+Free with <i>slurm_free_job_step_info_response_msg</i> to
 avoid memory leak.</li>
 
 <li><b>slurm_print_job_step_info_msg</b>&#151;Print information about
@@ -73,7 +73,7 @@ all job steps.</li>
 <li><b>slurm_print_job_step_info</b>&#151;Print information about
 a specific job step.</li>
 
-<li><b>slurm_free_job_step_info_response_msg</b>&#151;Free storage 
+<li><b>slurm_free_job_step_info_response_msg</b>&#151;Free storage
 allocated by <i>slurm_get_job_steps</i>.</li>
 
 </ul>
@@ -121,10 +121,10 @@ allocated by <i>slurm_load_partitions</i>.</li>
 <h3>Error Handling</h3>
 <ul>
 
-<li><b>slurm_get_errno</b>&#151;Return the error code set by the 
+<li><b>slurm_get_errno</b>&#151;Return the error code set by the
 last SLURM API function executed.</li>
 
-<li><b>slurm_perror</b>&#151;Print SLURM error information to 
+<li><b>slurm_perror</b>&#151;Print SLURM error information to
 standard output.</li>
 
 <li><b>slurm_strerror</b>&#151;Return a string describing a specific
@@ -138,21 +138,21 @@ SLURM error code.</li>
 <ul>
 
 <li><b>slurm_init_job_desc_msg</b>&#151;Initialize the data structure
-used in resource allocation requests. You can then just set the fields 
+used in resource allocation requests. You can then just set the fields
 of particular interest and let the others use default values.</li>
 
-<li><b>slurm_job_will_run</b>&#151;Determine if a job would be 
+<li><b>slurm_job_will_run</b>&#151;Determine if a job would be
 immediately initiated if submitted now.</li>
 
 <li><b>slurm_allocate_resources</b>&#151;Allocate resources for a job.
-Response message must be freed using 
+Response message must be freed using
 <i>slurm_free_resource_allocation_response_msg</i> to avoid a
 memory leak.</li>
 
 <li><b>slurm_free_resource_allocation_response_msg</b>&#151;
 Frees memory allocated by <i>slurm_allocate_resources</i>.</li>
 
-<li><b>slurm_allocate_resources_and_run</b>&#151;Allocate resources for a 
+<li><b>slurm_allocate_resources_and_run</b>&#151;Allocate resources for a
 job and spawn a job step. Response message must be freed using
 <i>slurm_free_resource_allocation_and_run_response_msg</i> to avoid a
 memory leak.</li>
@@ -160,7 +160,7 @@ memory leak.</li>
 <li><b>slurm_free_resource_allocation_and_run_response_msg</b>&#151;
 Frees memory allocated by <i>slurm_allocate_resources_and_run</i>.</li>
 
-<li><b>slurm_submit_batch_job</b>&#151;Submit a script for later 
+<li><b>slurm_submit_batch_job</b>&#151;Submit a script for later
 execution. Response message must be freed using
 <i>slurm_free_submit_response_response_msg</i> to avoid a
 memory leak.</li>
@@ -179,19 +179,19 @@ allocation is still active or for error recovery.</li>
 
 
 <h3>Job Step Creation</h3>
-<p>SLURM job steps involve numerous interactions with the 
-<i>slurmd</i> daemon. The job step creation is only the 
-first step in the process. We don't advise direct user 
-creation of job steps, but include the information here 
+<p>SLURM job steps involve numerous interactions with the
+<i>slurmd</i> daemon. The job step creation is only the
+first step in the process. We don't advise direct user
+creation of job steps, but include the information here
 for completeness.</p>
 <ul>
 
 <li><b>slurm_job_step_create</b>&#151;Initiate a job step.
-Allocated memory must be freed by 
-<i>slurm_free_job_step_create_response_msg</i> to avoid a 
+Allocated memory must be freed by
+<i>slurm_free_job_step_create_response_msg</i> to avoid a
 memory leak.</li>
 
-<li><b>slurm_free_job_step_create_response_msg</b>&#151;Free 
+<li><b>slurm_free_job_step_create_response_msg</b>&#151;Free
 memory allocated by <i>slurm_job_step_create</i>.
 
 <li><b>slurm_step_ctx_create</b>&#151;Create job step context.
@@ -232,7 +232,7 @@ field as returned by <i>slurm_step_ctx_get</i>.</li>
 <li><b>slurm_complete_job</b>&#151;Note completion of a job.
 Releases resource allocation for the job.</li>
 
-<li><b>slurm_complete_job_step</b>&#151;Note completion of a 
+<li><b>slurm_complete_job_step</b>&#151;Note completion of a
 job step.</li>
 
 </ul>
@@ -242,14 +242,14 @@ job step.</li>
 <h3>Checkpoint</h3>
 <ul>
 
-<li><b>slurm_checkpoint_able</b>&#151;Note that a specific job or 
+<li><b>slurm_checkpoint_able</b>&#151;Note that a specific job or
 job step is eligible for checkpoint.</li>
 
-<li><b>slurm_checkpoint_complete</b>&#151;Note that a requested 
+<li><b>slurm_checkpoint_complete</b>&#151;Note that a requested
 checkpoint has completed.</li>
 
-<li><b>slurm_checkpoint_create</b>&#151;Request a checkpoint for 
-a specific job step. Continue execution upon completion of the 
+<li><b>slurm_checkpoint_create</b>&#151;Request a checkpoint for
+a specific job step. Continue execution upon completion of the
 checkpoint.</li>
 
 <li><b>slurm_checkpoint_vacate</b>&#151;Request a checkpoint for
@@ -259,13 +259,13 @@ checkpoint.</li>
 <li><b>slurm_checkpoint_disable</b>&#151;Make the identified job step
 non-checkpointable.</li>
 
-<li><b>slurm_checkpoint_enable</b>&#151;Make the identified job 
+<li><b>slurm_checkpoint_enable</b>&#151;Make the identified job
 step checkpointable.</li>
 
-<li><b>slurm_checkpoint_error</b>&#151;Get error information for 
+<li><b>slurm_checkpoint_error</b>&#151;Get error information for
 the last checkpoint operation on a given job step.</li>
 
-<li><b>slurm_checkpoint_restart</b>&#151;Request that a previously 
+<li><b>slurm_checkpoint_restart</b>&#151;Request that a previously
 checkpointed job resume execution.</li>
 
 </ul>
@@ -276,9 +276,9 @@ checkpointed job resume execution.</li>
 <p>Most of these functions can only be executed by user <i>root</i>.</p>
 <ul>
 
-<li><b>slurm_reconfigure</b>&#151;Update slurm daemons 
+<li><b>slurm_reconfigure</b>&#151;Update slurm daemons
 based upon current <i>slurm.conf</i> configuration file.
-Use this after updating the configuration file to 
+Use this after updating the configuration file to
 insure that it takes effect.</li>
 
 <li><b>slurm_shutdown</b>&#151;Terminate slurm daemons.</li>
@@ -287,14 +287,14 @@ insure that it takes effect.</li>
 information associated with a given job.</li>
 
 <li><b>slurm_update_node</b>&#151;Update state
-information associated with a given node. NOTE: Most 
+information associated with a given node. NOTE: Most
 of a node's characteristics can not be modified.</li>
 
-<li><b>slurm_init_part_desc_msg</b>&#151;Initialize a 
-partition update descriptor. Used this to initialize 
+<li><b>slurm_init_part_desc_msg</b>&#151;Initialize a
+partition update descriptor. Used this to initialize
 the data structure used in <i>slurm_update_partition</i>.</li>
 
-<li><b>slurm_update_partition</b>&#151;Update state 
+<li><b>slurm_update_partition</b>&#151;Update state
 information associated with a given partition.</li>
 
 <li><b>slurm_delete_partition</b>&#151;Destroy a partition.</li>
@@ -305,9 +305,9 @@ information associated with a given partition.</li>
 
 <h3>SLURM Host List Support</h3>
 <p>SLURM uses a condensed format to express node names.
-For example <i>linux[1-3,6]</i> represents <i>linux1</i>, 
-<i>linux2</i>, <i>linux3</i>, and <i>linux6</i>. These 
-functions permit you to translate the SLURM expression 
+For example <i>linux[1-3,6]</i> represents <i>linux1</i>,
+<i>linux2</i>, <i>linux3</i>, and <i>linux6</i>. These
+functions permit you to translate the SLURM expression
 into a list of individual node names.</p>
 
 <ul>
@@ -316,8 +316,8 @@ into a list of individual node names.</p>
 node name expression into a record used for parsing.
 Use <i>slurm_hostlist_destroy</i> to free the allocated
 storage.</li>
- 
-<li><b>slurm_hostlist_shift</b>&#151;Get the next node 
+
+<li><b>slurm_hostlist_shift</b>&#151;Get the next node
 name.</li>
 
 <li><b>slurm_hostlist_destroy</b>&#151;Release storage
diff --git a/doc/html/arch.gif b/doc/html/arch.gif
index 1cae914e0d1a81f6f990484f1ca172b71e6fc669..85755164174fbd76ada3a9b99f6015eaf2524150 100644
Binary files a/doc/html/arch.gif and b/doc/html/arch.gif differ
diff --git a/doc/html/authplugins.shtml b/doc/html/authplugins.shtml
index 02c221822c5eb5f34c6a76130731acd2c1de80e6..2aa2e37712bc1d4d11649e6c740c0f146c58aa6a 100644
--- a/doc/html/authplugins.shtml
+++ b/doc/html/authplugins.shtml
@@ -3,73 +3,73 @@
 <h1><a name="top">SLURM Authentication Plugin API</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM authentication plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p> This document describes SLURM authentication plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 authentication plugins. This is version 100 of the API.</p>
-<p>SLURM authentication plugins are SLURM plugins that implement the SLURM authentication 
-API described herein. They must conform to the SLURM Plugin API with the following 
+<p>SLURM authentication plugins are SLURM plugins that implement the SLURM authentication
+API described herein. They must conform to the SLURM Plugin API with the following
 specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;auth.&quot; The minor type can be any recognizable 
+The major type must be &quot;auth.&quot; The minor type can be any recognizable
 abbreviation for the type of authentication. We recommend, for example:</p>
 <ul>
-<li><b>none</b>&#151;A plugin that implements the API without providing any actual 
-authentication service. This may be used for testing purposes, but is not suitable for 
+<li><b>none</b>&#151;A plugin that implements the API without providing any actual
+authentication service. This may be used for testing purposes, but is not suitable for
 production use due to lack of effective security.</li>
 <li><b>authd</b>&#151;Brett Chun's Linux authd.</li>
 <li><b>munge</b>&#151;LLNL's Munge protocol (recommended plugin for production use).</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and <span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for authentication. 
+<p>The <span class="commandline">plugin_name</span> and <span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for authentication.
 Note carefully, however, the versioning discussion below.</p>
-<p>The programmer is urged to study <span class="commandline">src/plugins/auth/none/auth_none.c</span> 
+<p>The programmer is urged to study <span class="commandline">src/plugins/auth/none/auth_none.c</span>
 for an example implementation of a SLURM authentication plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 <h2>Data Objects</h2>
-<p> The implementation must support an opaque class, which it defines, to be used 
-as an authentication &quot;credential.&quot; This class must encapsulate all user-specific 
-information necessary for the operation of the API specification below. The credential 
+<p> The implementation must support an opaque class, which it defines, to be used
+as an authentication &quot;credential.&quot; This class must encapsulate all user-specific
+information necessary for the operation of the API specification below. The credential
 is referred to in SLURM code by an anonymous pointer (void *).</p>
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <b>errno</b> to allow SLURM to discover as practically as possible 
-the reason for any failed API call. The following enumerated integer values (declared 
-in <span class="commandline">src/common/slurm_auth.h</span>) must be used when 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <b>errno</b> to allow SLURM to discover as practically as possible
+the reason for any failed API call. The following enumerated integer values (declared
+in <span class="commandline">src/common/slurm_auth.h</span>) must be used when
 appropriate.</p>
-<p style="margin-left:.2in">SLURM_AUTH_BADARG&#151;an argument to an API function 
+<p style="margin-left:.2in">SLURM_AUTH_BADARG&#151;an argument to an API function
 was invalid or malformed.<br>
-SLURM_AUTH_MEMORY&#151;a request could not be satisfied because memory for it 
+SLURM_AUTH_MEMORY&#151;a request could not be satisfied because memory for it
 could not be allocated.<br>
-SLURM_AUTH_NOUSER&#151;a credential is improper because it refers to an unknown 
+SLURM_AUTH_NOUSER&#151;a credential is improper because it refers to an unknown
 user.<br>
-SLURM_AUTH_INVALID&#151;a credential is improper because the validation of it 
+SLURM_AUTH_INVALID&#151;a credential is improper because the validation of it
 has failed. This is specifically distinct from the expiration of a credential.<br>
-SLURM_AUTH_MISMATCH&#151;a credential could not be properly unpacked because it 
+SLURM_AUTH_MISMATCH&#151;a credential could not be properly unpacked because it
 is of an incompatible type or version.</p>
-<p>These values must not be used as return values in integer-valued functions 
-in the API. The proper error return value from integer-valued functions is SLURM_ERROR. 
-While it is most practical to associate a different errno with each instance of 
-a credential, this is not necessarily enforced by the API. The implementation 
-should endeavor to provide useful and pertinent information by whatever means 
-is practical. In most cases, this means an errno for each credential, since plugins 
-must be re-entrant. If a plugin maintains a global errno in place of or in addition 
-to a per-credential errno, it is not required to enforce mutual exclusion on it. 
-Successful API calls are not required to reset any errno to a known value. However, 
-the initial value of any errno, prior to any error condition arising, should be 
+<p>These values must not be used as return values in integer-valued functions
+in the API. The proper error return value from integer-valued functions is SLURM_ERROR.
+While it is most practical to associate a different errno with each instance of
+a credential, this is not necessarily enforced by the API. The implementation
+should endeavor to provide useful and pertinent information by whatever means
+is practical. In most cases, this means an errno for each credential, since plugins
+must be re-entrant. If a plugin maintains a global errno in place of or in addition
+to a per-credential errno, it is not required to enforce mutual exclusion on it.
+Successful API calls are not required to reset any errno to a known value. However,
+the initial value of any errno, prior to any error condition arising, should be
 SLURM_SUCCESS. </p>
-<p>Plugins may assign implementation-specific values to errno so long as they 
-do not conflict with the values assigned above. This is done programmatically 
-by assigning plugin-specific errno values which are arithmetically greater than 
+<p>Plugins may assign implementation-specific values to errno so long as they
+do not conflict with the values assigned above. This is done programmatically
+by assigning plugin-specific errno values which are arithmetically greater than
 or equal to the symbol SLURM_AUTH_FIRST_LOCAL_ERROR.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <p class="commandline">void *slurm_auth_create (void **argv, char *auth_info);</p>
-<p style="margin-left:.2in"><b>Description</b>: Allocates from the free store 
-an anonymous credential object and returns a pointer to it. The pointer should 
-be valid until passed to <span class="commandline">slurm_auth_destroy()</span> for 
+<p style="margin-left:.2in"><b>Description</b>: Allocates from the free store
+an anonymous credential object and returns a pointer to it. The pointer should
+be valid until passed to <span class="commandline">slurm_auth_destroy()</span> for
 disposal. SLURM will not pass
 credentials to the API which have not been allocated by this function.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
@@ -77,138 +77,138 @@ credentials to the API which have not been allocated by this function.</p>
 information, timeouts for authd<br>
 <span class="commandline">auth_info</span> &nbsp;&nbsp;(input) plugin specific
 identification of the server.</p>
-<p style="margin-left:.2in"><b>Returns</b>: A pointer to a newly allocated credential 
-if successful. On failure, the plugin should return NULL and set its errno to 
+<p style="margin-left:.2in"><b>Returns</b>: A pointer to a newly allocated credential
+if successful. On failure, the plugin should return NULL and set its errno to
 an appropriate value to indicate the reason for failure.</p>
 <p class="commandline">int slurm_auth_destroy (void *cr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Deallocates a credential that 
-was allocated with <span class="commandline">slurm_auth_alloc()</span> and any 
+<p style="margin-left:.2in"><b>Description</b>: Deallocates a credential that
+was allocated with <span class="commandline">slurm_auth_alloc()</span> and any
 associated storage that has been allocated for it during its use.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> cr</span>&nbsp; 
-&nbsp;&nbsp;(input) pointer to the credential that is to be deallocated. Cannot 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> cr</span>&nbsp;
+&nbsp;&nbsp;(input) pointer to the credential that is to be deallocated. Cannot
 be NULL.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <p class="commandline">int slurm_auth_verify (void *cr, char *auth_info );</p>
-<p style="margin-left:.2in"><b>Description</b>: Verifies that a credential is 
-in order and correctly identifies the associated user. It also verifies that the 
-credential has not expired. If verification is successful, the return values of 
-<span class="commandline">slurm_auth_get_uid()</span> and 
-<span class="commandline">slurm_auth_get_gid()</span> 
-in subsequent calls must correspond to the actual verified system UID and GID 
-of the user associated with the credential. Verification must fail if the credential 
-has not previously been activated, even if a credential implementation cannot 
-exist in an unactivated state. A credential's valid term is defined at activation 
-and verification must fail if the credential has expired, even if it would otherwise 
+<p style="margin-left:.2in"><b>Description</b>: Verifies that a credential is
+in order and correctly identifies the associated user. It also verifies that the
+credential has not expired. If verification is successful, the return values of
+<span class="commandline">slurm_auth_get_uid()</span> and
+<span class="commandline">slurm_auth_get_gid()</span>
+in subsequent calls must correspond to the actual verified system UID and GID
+of the user associated with the credential. Verification must fail if the credential
+has not previously been activated, even if a credential implementation cannot
+exist in an unactivated state. A credential's valid term is defined at activation
+and verification must fail if the credential has expired, even if it would otherwise
 be valid.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">cr</span> &nbsp;&nbsp;(input) pointer to the credential 
+<span class="commandline">cr</span> &nbsp;&nbsp;(input) pointer to the credential
 which is to be verified. Cannot be NULL.<br>
 <span class="commandline">auth_info</span> &nbsp;&nbsp;(input) plugin specific
 identification of the server.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the credential is 
-verified to be in order and has not expired. If the credential cannot be verified, 
-or if the credential has expired, the function should return SLURM_ERROR and set 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the credential is
+verified to be in order and has not expired. If the credential cannot be verified,
+or if the credential has expired, the function should return SLURM_ERROR and set
 its errno to an appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">uid_t slurm_auth_get_uid (void *cr, char *auth_info);<br>
 gid_t slurm_auth_get_gid (void *cr, char *auth_info);</p>
-<p style="margin-left:.2in"><b>Description</b>: Extracts the numerical UID (GID) 
-of the user corresponding to the given credential. SLURM considers this value 
-trustworthy only if the credential has been successfully verified using 
-<span class="commandline">slurm_auth_verify()</span>. 
-An unverified credential does not immediately give rise to an error condition 
-in these functions, since this would require a plugin to distinguish between a 
-verified and an unverified credential, which may be computationally expensive. 
+<p style="margin-left:.2in"><b>Description</b>: Extracts the numerical UID (GID)
+of the user corresponding to the given credential. SLURM considers this value
+trustworthy only if the credential has been successfully verified using
+<span class="commandline">slurm_auth_verify()</span>.
+An unverified credential does not immediately give rise to an error condition
+in these functions, since this would require a plugin to distinguish between a
+verified and an unverified credential, which may be computationally expensive.
 A plugin may consider the lack of verification as an error.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
- <span class="commandline">cr</span> &nbsp;&nbsp; (input) pointer to the credential 
+ <span class="commandline">cr</span> &nbsp;&nbsp; (input) pointer to the credential
 containing the desired identification.  Cannot be NULL.<br>
 <span class="commandline">auth_info</span> &nbsp;&nbsp;(input) plugin specific
 identification of the server.</p>
-<p style="margin-left:.2in"><b>Returns</b>: If successful, the Linux UID (GID) 
-associated with the credential. In case of error, SLURM_AUTH_NOBODY should be 
+<p style="margin-left:.2in"><b>Returns</b>: If successful, the Linux UID (GID)
+associated with the credential. In case of error, SLURM_AUTH_NOBODY should be
 returned and errno set appropriately to indicate the cause of the failure.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <p class="commandline">int slurm_auth_pack (void *cr, Buf buf);</p>
-<p style="margin-left:.2in"><b>Description</b>: Marshals a credential into a buffer 
-for transmission according to the SLURM packing protocol. All authentication plugins 
-must first pack the plugin_type and then the plugin_version data before any plugin-specific 
-data elements are packed. slurm_auth_pack() and slurm_auth_pack() are strictly 
-reciprocal. The esult of a packing followed by an unpacking must be a functionally 
-equivalent credential. A credential is deemed appropriate for marshalling at any 
+<p style="margin-left:.2in"><b>Description</b>: Marshals a credential into a buffer
+for transmission according to the SLURM packing protocol. All authentication plugins
+must first pack the plugin_type and then the plugin_version data before any plugin-specific
+data elements are packed. slurm_auth_pack() and slurm_auth_pack() are strictly
+reciprocal. The esult of a packing followed by an unpacking must be a functionally
+equivalent credential. A credential is deemed appropriate for marshalling at any
 time after its allocation and before its destruction.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline">cr</span>&nbsp; &nbsp;&nbsp;(input) pointer to the credential 
+<span class="commandline">cr</span>&nbsp; &nbsp;&nbsp;(input) pointer to the credential
 to pack.<br>
-<span class="commandline">buf</span>&nbsp;&nbsp;&nbsp; (input/output) the buffer 
+<span class="commandline">buf</span>&nbsp;&nbsp;&nbsp; (input/output) the buffer
 into which the credential should be packed.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure 
-the plugin should return SLURM_ERROR and set the errno to indicate the reason 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure
+the plugin should return SLURM_ERROR and set the errno to indicate the reason
 for the failure.</p>
 <p class="commandline">int slurm_auth_unpack (void *cr, Buf buf);</p>
-<p style="margin-left:.2in"><b>Description</b>: Unmarshals a credential from a 
-buffer according to the SLURM packing protocol into a supplied (and presumed empty) 
-credential object. The unmarshalled credential is not assumed to be activated 
-or verified. The <span class="commandline">plugin_type</span> and <span class="commandline">plugin_version</span> 
-data should first be unpacked from the buffer and verified for applicability. 
-The API does not enforce that they must be equivalent, merely compatible. Compatibility 
+<p style="margin-left:.2in"><b>Description</b>: Unmarshals a credential from a
+buffer according to the SLURM packing protocol into a supplied (and presumed empty)
+credential object. The unmarshalled credential is not assumed to be activated
+or verified. The <span class="commandline">plugin_type</span> and <span class="commandline">plugin_version</span>
+data should first be unpacked from the buffer and verified for applicability.
+The API does not enforce that they must be equivalent, merely compatible. Compatibility
 is implementation-dependent.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline">cr</span> &nbsp;&nbsp;&nbsp;(output) pointer to the 
+<span class="commandline">cr</span> &nbsp;&nbsp;&nbsp;(output) pointer to the
 credential to pack.<br>
-<span class="commandline">buf</span> &nbsp;&nbsp;&nbsp;(input/output) the buffer 
+<span class="commandline">buf</span> &nbsp;&nbsp;&nbsp;(input/output) the buffer
 from which the credential should be unpacked.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the credential was 
-successfully unpacked. In case of failure, the function should return SLURM_ERROR 
-and set errno appropriately to indicate the cause of the failure. If the function 
-fails, no assumptions are made about the state of the credential except its suitability 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the credential was
+successfully unpacked. In case of failure, the function should return SLURM_ERROR
+and set errno appropriately to indicate the cause of the failure. If the function
+fails, no assumptions are made about the state of the credential except its suitability
 for destruction via <span class="commandline">slurm_auth_destroy()</span>.</p>
 <p class="commandline">int slurm_auth_print (void *cr, FILE *fp);</p>
-<p style="margin-left:.2in"><b>Description</b>: Writes a human-readable representation 
-of the credential to a standard I/O stream. There are no strict API constraints 
-on the behavior of this function, however it is recommended that the information 
-be as complete and as concise as possible. For example, lengthy digital &quot;signatures&quot; 
-need not be printed bitwise, but may be represented by their checksum. The intent 
+<p style="margin-left:.2in"><b>Description</b>: Writes a human-readable representation
+of the credential to a standard I/O stream. There are no strict API constraints
+on the behavior of this function, however it is recommended that the information
+be as complete and as concise as possible. For example, lengthy digital &quot;signatures&quot;
+need not be printed bitwise, but may be represented by their checksum. The intent
 is to provide a depiction of the credential for debugging purposes.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: None.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure 
-the plugin should return SLURM_ERROR and set the errno appropriately to indicate 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure
+the plugin should return SLURM_ERROR and set the errno appropriately to indicate
 the cause of failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 <p class="commandline">int slurm_auth_errno (void *cr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Returns the current value of errno. 
-Whether the value is associated with the given credential or with the plugin as 
-a whole is implementation-dependent. Because this function can be used to discover 
+<p style="margin-left:.2in"><b>Description</b>: Returns the current value of errno.
+Whether the value is associated with the given credential or with the plugin as
+a whole is implementation-dependent. Because this function can be used to discover
 the reason why a credential allocation has failed, the argument is advisory. </p>
-<p style="margin-left:.2in"><b>Arguments</b>: <span class="commandline">cr</span>&nbsp;&nbsp;&nbsp;&nbsp; 
-(input) pointer to the credential, the status of whose most recently executed 
-API function is to be returned. This value may be NULL, indicating that the most 
+<p style="margin-left:.2in"><b>Arguments</b>: <span class="commandline">cr</span>&nbsp;&nbsp;&nbsp;&nbsp;
+(input) pointer to the credential, the status of whose most recently executed
+API function is to be returned. This value may be NULL, indicating that the most
 recent errno value applicable to the plugin as a whole is to be returned.</p>
-<p style="margin-left:.2in"><b>Returns</b>: The current value of errno or SLURM_SUCCESS 
+<p style="margin-left:.2in"><b>Returns</b>: The current value of errno or SLURM_SUCCESS
 if there is no error to report.</p>
 <p class="commandline">const char *slurm_auth_errstr (int errno);</p>
-<p style="margin-left:.2in"><b>Description</b>: Provides a human-readable string 
-associated with the given errno. The plugin need only supply error strings for 
-the errno values it defines and not for errno values listed above that are required 
+<p style="margin-left:.2in"><b>Description</b>: Provides a human-readable string
+associated with the given errno. The plugin need only supply error strings for
+the errno values it defines and not for errno values listed above that are required
 by the API.</p>
-<p style="margin-left:.2in"><b>Arguments</b>: <span class="commandline">errno</span>&nbsp;&nbsp;&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>: <span class="commandline">errno</span>&nbsp;&nbsp;&nbsp;
 (input) the plugin-specific errno for which a corresponding error message is desired.</p>
-<p style="margin-left:.2in"><b>Returns</b>: A pointer to a static error message. 
-This function must always return a pointer to a string, even if the string is 
+<p style="margin-left:.2in"><b>Returns</b>: A pointer to a static error message.
+This function must always return a pointer to a string, even if the string is
 empty or ambiguous such as &quot;unknown error.&quot;</p>
 
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM Authentication API. Future 
-releases of SLURM may revise this API. An authentication plugin conveys its ability 
-to implement a particular API version using the mechanism outlined for SLURM plugins. 
-In addition, the credential is transmitted along with the version number of the 
-plugin that transmitted it. It is at the discretion of the plugin author whether 
+<p> This document describes version 0 of the SLURM Authentication API. Future
+releases of SLURM may revise this API. An authentication plugin conveys its ability
+to implement a particular API version using the mechanism outlined for SLURM plugins.
+In addition, the credential is transmitted along with the version number of the
+plugin that transmitted it. It is at the discretion of the plugin author whether
 to maintain data format compatibility across different versions of the plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/big_sys.shtml b/doc/html/big_sys.shtml
index 23f11c18ad7dc3ef77bc0eb8b84dc654bd21882c..f5394eadceaa77e2d65a5b0691026bda56462865 100644
--- a/doc/html/big_sys.shtml
+++ b/doc/html/big_sys.shtml
@@ -2,21 +2,21 @@
 
 <h1>Large Cluster Administration Guide</h1>
 
-<p>This document contains SLURM administrator information specifically 
-for clusters containing 1,024 nodes or more. 
-Virtually all SLURM components have been validated (through emulation) 
-for clusters containing up to 65,536 compute nodes. 
-Getting optimal performance at that scale does require some tuning and 
+<p>This document contains SLURM administrator information specifically
+for clusters containing 1,024 nodes or more.
+Virtually all SLURM components have been validated (through emulation)
+for clusters containing up to 65,536 compute nodes.
+Getting optimal performance at that scale does require some tuning and
 this document should help you off to a good start.
-A working knowledge of SLURM should be considered a prerequisite 
+A working knowledge of SLURM should be considered a prerequisite
 for this material.</p>
 
 <h2>Performance Results</h2>
 
-<p>SLURM has been used on clusters containing up to 4,184 nodes. 
-At that scale, the total time to execute a simple program (resource 
-allocation, task launch, I/O processing, and cleanup, e.g. 
-"time srun -N4184 -n8368 uname") at 8,368 tasks 
+<p>SLURM has been used on clusters containing up to 4,184 nodes.
+At that scale, the total time to execute a simple program (resource
+allocation, task launch, I/O processing, and cleanup, e.g.
+"time srun -N4184 -n8368 uname") at 8,368 tasks
 across the 4,184 nodes was under 57 seconds. The table below shows
 total execution times for several large clusters with different architectures.</p>
 <table border>
@@ -43,81 +43,81 @@ total execution times for several large clusters with different architectures.</
 
 <h2>Node Selection Plugin (SelectType)</h2>
 
-<p>While allocating individual processors within a node is great 
-for smaller clusters, the overhead of keeping track of the individual 
-processors and memory within each node adds significant overhead. 
+<p>While allocating individual processors within a node is great
+for smaller clusters, the overhead of keeping track of the individual
+processors and memory within each node adds significant overhead.
 For best scalability, allocate whole nodes using <i>select/linear</i>
 or <i>select/bluegene</i> and avoid <i>select/cons_res</i>.</p>
 
 <h2>Job Accounting Gather Plugin (JobAcctGatherType)</h2>
 
-<p>Job accounting relies upon the <i>slurmstepd</i> daemon on each compute 
+<p>Job accounting relies upon the <i>slurmstepd</i> daemon on each compute
 node periodically sampling data.
 This data collection will take compute cycles away from the application
 inducing what is known as <i>system noise</i>.
-For large parallel applications, this system noise can detract for 
+For large parallel applications, this system noise can detract for
 application scalability.
-For optimal application performance, disabling job accounting 
+For optimal application performance, disabling job accounting
 is best (<i>jobacct_gather/none</i>).
-Consider use of job completion records (<i>JobCompType</i>) for accounting 
+Consider use of job completion records (<i>JobCompType</i>) for accounting
 purposes as this entails far less overhead.
-If job accounting is required, configure the sampling interval 
+If job accounting is required, configure the sampling interval
 to a relatively large size (e.g. <i>JobAcctGatherFrequency=300</i>).
-Some experimentation may also be required to deal with collisions 
+Some experimentation may also be required to deal with collisions
 on data transmission.</p>
 
 <h2>Node Configuration</h2>
 
-<p>While SLURM can track the amount of memory and disk space actually found 
-on each compute node and use it for scheduling purposes, this entails 
-extra overhead. 
-Optimize performance by specifying the expected configuration using 
-the available parameters (<i>RealMemory</i>, <i>Procs</i>, and 
-<i>TmpDisk</i>). 
-If the node is found to contain less resources than configured, 
-it will be marked DOWN and not used. 
+<p>While SLURM can track the amount of memory and disk space actually found
+on each compute node and use it for scheduling purposes, this entails
+extra overhead.
+Optimize performance by specifying the expected configuration using
+the available parameters (<i>RealMemory</i>, <i>Procs</i>, and
+<i>TmpDisk</i>).
+If the node is found to contain less resources than configured,
+it will be marked DOWN and not used.
 Also set the <i>FastSchedule</i> parameter.
-While SLURM can easily handle a heterogeneous cluster, configuring 
+While SLURM can easily handle a heterogeneous cluster, configuring
 the nodes using the minimal number of lines in <i>slurm.conf</i>
 will both make for easier administration and better performance.</p>
 
 <h2>Timers</h2>
 
-<p>The configuration parameter <i>SlurmdTimeout</i> determines the interval 
+<p>The configuration parameter <i>SlurmdTimeout</i> determines the interval
 at which <i>slurmctld</i> routinely communicates with <i>slurmd</i>.
 Communications occur at half the <i>SlurmdTimeout</i> value.
-The purpose of this is to determine when a compute node fails 
-and thus should not be allocated work. 
-Longer intervals decrease system noise on compute nodes (we do 
-synchronize these requests across the cluster, but there will 
+The purpose of this is to determine when a compute node fails
+and thus should not be allocated work.
+Longer intervals decrease system noise on compute nodes (we do
+synchronize these requests across the cluster, but there will
 be some impact upon applications).
-For really large clusters, <i>SlurmdTimeout</i> values of 
-120 seconds or more are reasonable.</p> 
+For really large clusters, <i>SlurmdTimeout</i> values of
+120 seconds or more are reasonable.</p>
 
 <p>If MPICH-2 is used, the srun command will manage the key-pairs
-used to bootstrap the application. 
-Depending upon the processor speed and architecture, the communication 
-of key-pair information may require extra time. 
-This can be done by setting an environment variable PMI_TIME before 
-executing srun to launch the tasks. 
-The default value of PMI_TIME is 500 and this is the number of 
-microseconds alloted to transmit each key-pair. 
+used to bootstrap the application.
+Depending upon the processor speed and architecture, the communication
+of key-pair information may require extra time.
+This can be done by setting an environment variable PMI_TIME before
+executing srun to launch the tasks.
+The default value of PMI_TIME is 500 and this is the number of
+microseconds alloted to transmit each key-pair.
 We have executed up to 16,000 tasks with a value of PMI_TIME=4000.</p>
 
 <p>The individual slurmd daemons on compute nodes will initiate messages
-to the slurmctld daemon only when they start up or when the epilog 
-completes for a job. When a job allocated a large number of nodes 
-completes, it can cause a very large number of messages to be sent 
+to the slurmctld daemon only when they start up or when the epilog
+completes for a job. When a job allocated a large number of nodes
+completes, it can cause a very large number of messages to be sent
 by the slurmd daemons on these nodes to the slurmctld daemon all at
 the same time. In order to spread this message traffic out over time
-and avoid message loss, The <i>EpilogMsgTime</i> parameter may be 
-used. Note that even if messages are lost, they will be retransmitted, 
+and avoid message loss, The <i>EpilogMsgTime</i> parameter may be
+used. Note that even if messages are lost, they will be retransmitted,
 but this will result in a delay for reallocating resources to new jobs.</p>
 
 <h2>Other</h2>
 
 <p>SLURM uses hierarchical communications between the slurmd daemons
-in order to increase parallelism and improve performance. The 
+in order to increase parallelism and improve performance. The
 <i>TreeWidth</i> configuration parameter controls the fanout of messages.
 The default value is 50, meaning each slurmd daemon can communicate
 with up to 50 other slurmd daemons and over 2500 nodes can be contacted
@@ -127,8 +127,8 @@ Optimal system performance can typically be achieved if <i>TreeWidth</i>
 is set to the square root of the number of nodes in the cluster for
 systems having no more than 2500 nodes or the cube root for larger
 systems.</p>
- 
-<p>The srun command automatically increases its open file limit to 
+
+<p>The srun command automatically increases its open file limit to
 the hard limit in order to process all of the standard input and output
 connections to the launched tasks. It is recommended that you set the
 open file hard limit to 8192 across the cluster.</p>
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index d01449dce76230dc98cf42c34644e0f15e677dcf..a0a2a7b5188bec1116ce5b48dd5aec9c55d174f2 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -5,44 +5,44 @@
 <h2>Overview</h2>
 
 <p>This document describes the unique features of SLURM on the
-<a href="http://www.research.ibm.com/bluegene/">IBM BlueGene</a> systems. 
-You should be familiar with the SLURM's mode of operation on Linux clusters 
-before studying the relatively few differences in BlueGene operation 
+<a href="http://www.research.ibm.com/bluegene/">IBM BlueGene</a> systems.
+You should be familiar with the SLURM's mode of operation on Linux clusters
+before studying the relatively few differences in BlueGene operation
 described in this document.</p>
 
-<p>BlueGene systems have several unique features making for a few 
-differences in how SLURM operates there. 
-The BlueGene system consists of one or more <i>base partitions</i> or 
-<i>midplanes</i> connected in a three-dimensional torus. 
-Each <i>base partition</i> consists of 512 <i>c-nodes</i> each containing two processors; 
-one designed primarily for computations and the other primarily for managing communications. 
-The <i>c-nodes</i> can execute only one process and thus are unable to execute both 
+<p>BlueGene systems have several unique features making for a few
+differences in how SLURM operates there.
+The BlueGene system consists of one or more <i>base partitions</i> or
+<i>midplanes</i> connected in a three-dimensional torus.
+Each <i>base partition</i> consists of 512 <i>c-nodes</i> each containing two processors;
+one designed primarily for computations and the other primarily for managing communications.
+The <i>c-nodes</i> can execute only one process and thus are unable to execute both
 the user's jobs and SLURM's <i>slurmd</i> daemon.
-Thus the <i>slurmd</i> daemon executes on one of the BlueGene <i>Front End Nodes</i>. 
-This single <i>slurmd</i> daemon provides (almost) all of the normal SLURM services 
+Thus the <i>slurmd</i> daemon executes on one of the BlueGene <i>Front End Nodes</i>.
+This single <i>slurmd</i> daemon provides (almost) all of the normal SLURM services
 for every <i>base partition</i> on the system. </p>
 
-<p>Internally SLURM treats each <i>base partition</i> as one node with 
+<p>Internally SLURM treats each <i>base partition</i> as one node with
 1024 processors, which keeps the number of entities being managed reasonable.
 Since the current BlueGene software can sub-allocate a <i>base partition</i>
-into blocks of 32 and/or 128 <i>c-nodes</i>, more than one user job can execute 
+into blocks of 32 and/or 128 <i>c-nodes</i>, more than one user job can execute
 on each <i>base partition</i> (subject to system administrator configuration).
-To effectively utilize this environment, SLURM tools present the user with 
-the view that each <i>c-nodes</i> is a separate node, so allocation requests 
-and status information use <i>c-node</i> counts (this is a new feature in 
-SLURM version 1.1). 
-Since the <i>c-node</i> count can be very large, the suffix "k" can be used 
+To effectively utilize this environment, SLURM tools present the user with
+the view that each <i>c-nodes</i> is a separate node, so allocation requests
+and status information use <i>c-node</i> counts (this is a new feature in
+SLURM version 1.1).
+Since the <i>c-node</i> count can be very large, the suffix "k" can be used
 to represent multiples of 1024 (e.g. "2k" is equivalent to "2048").</p>
 
 <h2>User Tools</h2>
 
-<p>The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and scontrol 
-provide all of the expected services except support for job steps. 
-SLURM performs resource allocation for the job, but initiation of tasks is performed 
+<p>The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and scontrol
+provide all of the expected services except support for job steps.
+SLURM performs resource allocation for the job, but initiation of tasks is performed
 using the <i>mpirun</i> command. SLURM has no concept of a job step on BlueGene.
-Seven new sbatch options are available: 
+Seven new sbatch options are available:
 <i>--geometry</i> (specify job size in each dimension),
-<i>--no-rotate</i> (disable rotation of geometry), 
+<i>--no-rotate</i> (disable rotation of geometry),
 <i>--conn-type</i> (specify interconnect type between base partitions, mesh or torus).
 <i>--blrts-image</i> (specify alternative blrts image for bluegene --block.  Default if not set, BGL only.)
 <i>--cnload-image</i> (specify alternative c-node image for bluegene block.  Default if not set, BGP only.)
@@ -50,23 +50,23 @@ Seven new sbatch options are available:
 <i>--linux-image</i> (specify alternative linux image for bluegene block.  Default if not set, BGL only.)
 <i>--mloader-image</i> (specify alternative mloader image for bluegene block.  Default if not set).
 <i>--ramdisk-image</i> (specify alternative ramdisk image for bluegene block.  Default if not set, BGL only.)
-The <i>--nodes</i> option with a minimum and (optionally) maximum node count continues 
-to be available.  
+The <i>--nodes</i> option with a minimum and (optionally) maximum node count continues
+to be available.
 
 Note that this is a c-node count.</p>
 
-<p>To reiterate: sbatch is used to submit a job script, 
-but mpirun is used to launch the parallel tasks. 
-Note that a SLURM batch job's default stdout and stderr file names are generated 
-using the SLURM job ID. 
-When the SLURM control daemon is restarted, SLURM job ID values can be repeated, 
-therefore it is recommended that batch jobs explicitly specify unique names for 
+<p>To reiterate: sbatch is used to submit a job script,
+but mpirun is used to launch the parallel tasks.
+Note that a SLURM batch job's default stdout and stderr file names are generated
+using the SLURM job ID.
+When the SLURM control daemon is restarted, SLURM job ID values can be repeated,
+therefore it is recommended that batch jobs explicitly specify unique names for
 stdout and stderr files using the srun options <i>--output</i> and <i>--error</i>
 respectively.
-While the salloc command may be used to create an interactive SLURM job, 
-it will be the responsibility of the user to insure that the <i>bgblock</i> 
-is ready for use before initiating any mpirun commands. 
-SLURM will assume this responsibility for batch jobs. 
+While the salloc command may be used to create an interactive SLURM job,
+it will be the responsibility of the user to insure that the <i>bgblock</i>
+is ready for use before initiating any mpirun commands.
+SLURM will assume this responsibility for batch jobs.
 The script that you submit to SLURM can contain multiple invocations of mpirun as
 well as any desired commands for pre- and post-processing.
 The mpirun command will get its <i>bgblock</i> information from the
@@ -79,13 +79,13 @@ date
 mpirun -exec /home/user/prog -cwd /home/user -args 123
 mpirun -exec /home/user/prog -cwd /home/user -args 124
 # post-processing
-date 
+date
 </pre></p>
- 
+
 <h3><a name="naming">Naming Convensions</a></h3>
-<p>The naming of base partitions includes a three-digit suffix representing the its 
-coordinates in the X, Y and Z dimensions with a zero origin.  
-For example, "bg012" represents the base partition whose coordinate is at X=0, Y=1 and Z=2.  In a system 
+<p>The naming of base partitions includes a three-digit suffix representing the its
+coordinates in the X, Y and Z dimensions with a zero origin.
+For example, "bg012" represents the base partition whose coordinate is at X=0, Y=1 and Z=2.  In a system
 configured with <i>small blocks</i> (any block less than a full base partition) there will be divisions
 into the base partition notation.  For example, if there were 64 psets in the
 configuration, bg012[0-15] represents
@@ -93,20 +93,20 @@ the first quarter or first 16 ionodes of a midplane.  In BlueGene/L
 this would be 128 c-node block.  To represent the first nodecard in the
 second quarter or ionodes 16-19 the notation would be bg012[16-19], or
 a 32 c-node block.
-Since jobs must allocate consecutive base partitions in all three dimensions, we have developed 
-an abbreviated format for describing the base partitions in one of these three-dimensional blocks. 
-The base partition has a prefix determined from the system which is followed by the end-points 
+Since jobs must allocate consecutive base partitions in all three dimensions, we have developed
+an abbreviated format for describing the base partitions in one of these three-dimensional blocks.
+The base partition has a prefix determined from the system which is followed by the end-points
 of the block enclosed in square-brackets and separated by an "x".
-For example, "bg[620x731]" is used to represent the eight base partitions enclosed in a block 
-with end-points and bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721, 
+For example, "bg[620x731]" is used to represent the eight base partitions enclosed in a block
+with end-points and bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721,
 bg730 and bg731).</p></a>
 
 <p>
-<b>IMPORTANT:</b> SLURM version 1.2 or higher can handle a bluegene system of 
-sizes up to 36x36x36.  To try to keep with the 'three-digit suffix  
+<b>IMPORTANT:</b> SLURM version 1.2 or higher can handle a bluegene system of
+sizes up to 36x36x36.  To try to keep with the 'three-digit suffix
 representing the its coordinates in the X, Y and Z dimensions with a
 zero origin', we now support A-Z as valid numbers.  This makes it so
-the prefix <b>must always be lower case</b>, and any letters in the 
+the prefix <b>must always be lower case</b>, and any letters in the
 three-digit suffix <b> must always be upper case</b>.  This schema
 should be used in your slurm.conf file and in your bluegene.conf file
 if you put a prefix there even though it is not necessary there.  This
@@ -119,18 +119,18 @@ valid: bgl[000xC44] bgl000 bglZZZ
 invalid: BGL[000xC44] BglC00 bglb00 Bglzzz
 </p>
 
-<p>One new tool provided is <i>smap</i>. 
+<p>One new tool provided is <i>smap</i>.
 As of SLURM version 1.2, <i>sview</i> is
 another new tool offering even more viewing and configuring options.
-Smap is aware of system topography and provides a map of what base partitions 
-are allocated to jobs, partitions, etc. 
+Smap is aware of system topography and provides a map of what base partitions
+are allocated to jobs, partitions, etc.
 See the smap man page for details.
-A sample of smap output is provided below showing the location of five jobs. 
+A sample of smap output is provided below showing the location of five jobs.
 Note the format of the list of base partitions allocated to each job.
 Also note that idle (unassigned) base partitions are indicated by a period.
-Down and drained base partitions (those not available for use) are 
+Down and drained base partitions (those not available for use) are
 indicated by a number sign (bg703 in the display below).
-The legend is for illustrative purposes only. 
+The legend is for illustrative purposes only.
 The origin (zero in every dimension) is shown at the rear left corner of the bottom plane.
 Each set of four consecutive lines represents a plane in the Y dimension.
 Values in the X dimension increase to the right.
@@ -158,97 +158,97 @@ a a a a . . e e               |
 a a a a . . . #            Z
 </pre>
 
-<p>Note that jobs enter the SLURM state RUNNING as soon as the have been 
-allocated a bgblock. 
-If the bgblock is in a READY state, the job will begin execution almost 
-immediately. 
-Otherwise the execution of the job will not actually begin until the 
-bgblock is in a READY state, which can require booting the block and 
+<p>Note that jobs enter the SLURM state RUNNING as soon as the have been
+allocated a bgblock.
+If the bgblock is in a READY state, the job will begin execution almost
+immediately.
+Otherwise the execution of the job will not actually begin until the
+bgblock is in a READY state, which can require booting the block and
 a delay of minutes to do so.
 You can identify the bgblock associated with your job using the command
-<i>smap -Dj -c</i> and the state of the bgblock with the command 
+<i>smap -Dj -c</i> and the state of the bgblock with the command
 <i>smap -Db -c</i>.
-The time to boot a bgblock is related to its size, but should range from 
-from a few minutes to about 15 minutes for a bgblock containing 128 
-base partitions. 
-Only after the bgblock is READY will your job's output file be created 
-and the script execution begin. 
-If the bgblock boot fails, SLURM will attempt to reboot several times 
+The time to boot a bgblock is related to its size, but should range from
+from a few minutes to about 15 minutes for a bgblock containing 128
+base partitions.
+Only after the bgblock is READY will your job's output file be created
+and the script execution begin.
+If the bgblock boot fails, SLURM will attempt to reboot several times
 before draining the associated base partitions and aborting the job.</p>
 
-<p>The job will continue to be in a RUNNING state until the bgjob has 
-completed and the bgblock ownership is changed. 
-The time for completing a bgjob has frequently been on the order of 
+<p>The job will continue to be in a RUNNING state until the bgjob has
+completed and the bgblock ownership is changed.
+The time for completing a bgjob has frequently been on the order of
 five minutes.
-In summary, your job may appear in SLURM as RUNNING for 15 minutes 
+In summary, your job may appear in SLURM as RUNNING for 15 minutes
 before the script actually begins to 5 minutes after it completes.
-These delays are the result of the BlueGene infrastructure issues and are 
+These delays are the result of the BlueGene infrastructure issues and are
 not due to anything in SLURM.</p>
 
-<p>When using smap in default output  mode you can scroll through 
-the different windows using the arrow keys.  
-The <b>up</b> and <b>down</b> arrow keys scroll 
-the window containing the grid, and the <b>left</b> and <b>right</b> arrow 
+<p>When using smap in default output  mode you can scroll through
+the different windows using the arrow keys.
+The <b>up</b> and <b>down</b> arrow keys scroll
+the window containing the grid, and the <b>left</b> and <b>right</b> arrow
 keys scroll the window containing the text information.</p>
- 
+
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>System Administration</h2>
 
-<p>Building a BlueGene compatible system is dependent upon the 
-<i>configure</i> program locating some expected files. 
+<p>Building a BlueGene compatible system is dependent upon the
+<i>configure</i> program locating some expected files.
 In particular for a BlueGene/L system, the configure script searches
 for <i>libdb2.so</i> in the directories <i>/home/bgdb2cli/sqllib</i>
 and <i>/u/bgdb2cli/sqllib</i>.  If your DB2 library file is in a
 different location, use the configure
-option <i>--with-db2-dir=PATH</i> to specify the parent directory. 
-If you have the same version of the operating system on both the 
-Service Node (SN) and the Front End Nodes (FEN) then you can configure 
+option <i>--with-db2-dir=PATH</i> to specify the parent directory.
+If you have the same version of the operating system on both the
+Service Node (SN) and the Front End Nodes (FEN) then you can configure
 and build one set of files on the SN and install them on both the SN and FEN.
-Note that all smap functionality will be provided on the FEN 
-except for the ability to map SLURM node names to and from 
-row/rack/midplane data, which requires direct use of the Bridge API 
-calls only available on the SN.</p> 
+Note that all smap functionality will be provided on the FEN
+except for the ability to map SLURM node names to and from
+row/rack/midplane data, which requires direct use of the Bridge API
+calls only available on the SN.</p>
 
 <p>If you have different versions of the operating system on the SN and FEN
-(as was the case for some early system installations), then you will need 
-to configure and build two sets of files for installation. 
-One set will be for the Service Node (SN), which has direct access to the 
-Bridge APIs. 
-The second set will be for the Front End Nodes (FEN), which lack access to the 
-Bridge APIs and interact with using Remote Procedure Calls to the slurmctld 
+(as was the case for some early system installations), then you will need
+to configure and build two sets of files for installation.
+One set will be for the Service Node (SN), which has direct access to the
+Bridge APIs.
+The second set will be for the Front End Nodes (FEN), which lack access to the
+Bridge APIs and interact with using Remote Procedure Calls to the slurmctld
 daemon.
-You should see "#define HAVE_BG 1" and "#define HAVE_FRONT_END 1" in the "config.h" 
-file for both the SN and FEN builds. 
-You should also see "#define HAVE_BG_FILES 1" in config.h on the SN before 
+You should see "#define HAVE_BG 1" and "#define HAVE_FRONT_END 1" in the "config.h"
+file for both the SN and FEN builds.
+You should also see "#define HAVE_BG_FILES 1" in config.h on the SN before
 building SLURM. </p>
 
 <p>The slurmctld daemon should execute on the system's service node.
-If an optional backup daemon is used, it must be in some location where 
+If an optional backup daemon is used, it must be in some location where
 it is capable of executing Bridge APIs.
-One slurmd daemon should be configured to execute on one of the front end nodes. 
-That one slurmd daemon represents communications channel for every base partition. 
-You can use the scontrol command to drain individual nodes as desired and 
+One slurmd daemon should be configured to execute on one of the front end nodes.
+That one slurmd daemon represents communications channel for every base partition.
+You can use the scontrol command to drain individual nodes as desired and
 return them to service. </p>
 
 <p>The <i>slurm.conf</i> (configuration) file needs to have the value of <i>InactiveLimit</i>
-set to zero or not specified (it defaults to a value of zero). 
+set to zero or not specified (it defaults to a value of zero).
 This is because there are no job steps and we don't want to purge jobs prematurely.
-The value of <i>SelectType</i> must be set to "select/bluegene" in order to have 
-node selection performed using a system aware of the system's topography 
-and interfaces. 
-The value of <i>Prolog</i> should be set to the full pathname of a program that 
-will delay execution until the bgblock identified by the MPIRUN_PARTITION 
-environment variable is ready for use. It is recommended that you construct a script 
+The value of <i>SelectType</i> must be set to "select/bluegene" in order to have
+node selection performed using a system aware of the system's topography
+and interfaces.
+The value of <i>Prolog</i> should be set to the full pathname of a program that
+will delay execution until the bgblock identified by the MPIRUN_PARTITION
+environment variable is ready for use. It is recommended that you construct a script
 that serves this function and calls the supplied program <i>sbin/slurm_prolog</i>.
-The value of <i>Epilog</i> should be set to the full pathname of a program that 
+The value of <i>Epilog</i> should be set to the full pathname of a program that
 will wait until the bgblock identified by the MPIRUN_PARTITION environment
 variable is no longer usable by this job. It is recommended that you construct a script
 that serves this function and calls the supplied program <i>sbin/slurm_epilog</i>.
-The prolog and epilog programs are used to insure proper synchronization 
+The prolog and epilog programs are used to insure proper synchronization
 between the slurmctld daemon, the user job, and MMCS.
-A multitude of other functions may also be placed into the prolog and 
-epilog as desired (e.g. enabling/disabling user logins, puring file systmes, 
+A multitude of other functions may also be placed into the prolog and
+epilog as desired (e.g. enabling/disabling user logins, puring file systmes,
 etc.).  Sample prolog and epilog scripts follow. </p>
 
 <pre>
@@ -270,162 +270,162 @@ etc.).  Sample prolog and epilog scripts follow. </p>
 /usr/sbin/slurm_epilog
 </pre>
 
-<p>Since jobs with different geometries or other characteristics might not 
-interfere with each other, scheduling is somewhat different on a BlueGene 
+<p>Since jobs with different geometries or other characteristics might not
+interfere with each other, scheduling is somewhat different on a BlueGene
 system than typical clusters.
-SLURM's builtin scheduler on BlueGene will sort pending jobs and then attempt 
-to schedule <b>all</b> of them in priority order. 
+SLURM's builtin scheduler on BlueGene will sort pending jobs and then attempt
+to schedule <b>all</b> of them in priority order.
 This essentially functions as if there is a separate queue for each job size.
 SLURM's backfill scheduler on BlueGene will enforce FIFO (first-in first-out)
-scheduling with backfill (lower priority jobs will start early if doing so 
-will not impact the expected initiation time of a higher priority job). 
+scheduling with backfill (lower priority jobs will start early if doing so
+will not impact the expected initiation time of a higher priority job).
 As on other systems, effective backfill relies upon users setting reasonable
 job time limits.
-Note that SLURM does support different partitions with an assortment of 
+Note that SLURM does support different partitions with an assortment of
 different scheduling parameters.
-For example, SLURM can have defined a partition for full system jobs that 
-is enabled to execute jobs only at certain times; while a default partition 
-could be configured to execute jobs at other times. 
-Jobs could still be queued in a partition that is configured in a DOWN 
-state and scheduled to execute when changed to an UP state. 
-Base partitions can also be moved between slurm partitions either by changing 
-the <i>slurm.conf</i> file and restarting the slurmctld daemon or by using 
+For example, SLURM can have defined a partition for full system jobs that
+is enabled to execute jobs only at certain times; while a default partition
+could be configured to execute jobs at other times.
+Jobs could still be queued in a partition that is configured in a DOWN
+state and scheduled to execute when changed to an UP state.
+Base partitions can also be moved between slurm partitions either by changing
+the <i>slurm.conf</i> file and restarting the slurmctld daemon or by using
 the scontrol reconfig command. </p>
 
-<p>SLURM node and partition descriptions should make use of the 
+<p>SLURM node and partition descriptions should make use of the
 <a href="#naming">naming</a> conventions described above. For example,
 "NodeName=bg[000x733] NodeAddr=frontend0 NodeHostname=frontend0 Procs=1024"
 is used in <i>slurm.conf</i> to define a BlueGene system with 128 midplanes
 in an 8 by 4 by 4 matrix.
-The node name prefix of "bg" defined by NodeName can be anything you want, 
+The node name prefix of "bg" defined by NodeName can be anything you want,
 but needs to be consistent throughout the <i>slurm.conf</i> file.
-Note that the values of both NodeAddr and NodeHostname for all 
-128 base partitions is the name of the front-end node executing 
-the slurmd daemon. 
+Note that the values of both NodeAddr and NodeHostname for all
+128 base partitions is the name of the front-end node executing
+the slurmd daemon.
 No computer is actually expected to a hostname of "bg000" and no
 attempt will be made to route message traffic to this address. </p>
 
-<p>While users are unable to initiate SLURM job steps on BlueGene systems, 
-this restriction does not apply to user root or <i>SlurmUser</i>. 
-Be advised that the one slurmd supporting all nodes is unable to manage a 
-large number of job steps, so this ability should be used only to verify normal 
-SLURM operation. 
-If large numbers of job steps are initiated by slurmd, expect the daemon to 
-fail due to lack of memory or other resources. 
+<p>While users are unable to initiate SLURM job steps on BlueGene systems,
+this restriction does not apply to user root or <i>SlurmUser</i>.
+Be advised that the one slurmd supporting all nodes is unable to manage a
+large number of job steps, so this ability should be used only to verify normal
+SLURM operation.
+If large numbers of job steps are initiated by slurmd, expect the daemon to
+fail due to lack of memory or other resources.
 It is best to minimize other work on the front-end node executing slurmd
 so as to maximize its performance and minimize other risk factors.</p>
 
 <a name="bluegene-conf"><h2>Bluegene.conf File Creation</h2></a>
-<p>In addition to the normal <i>slurm.conf</i> file, a new 
-<i>bluegene.conf</i> configuration file is required with information pertinent 
+<p>In addition to the normal <i>slurm.conf</i> file, a new
+<i>bluegene.conf</i> configuration file is required with information pertinent
 to the sytem.
 Put <i>bluegene.conf</i> into the SLURM configuration directory with
 <i>slurm.conf</i>.
-A sample file is installed in <i>bluegene.conf.example</i>. 
-System administrators should use the <i>smap</i> tool to build appropriate 
-configuration file for static partitioning. 
-Note that <i>smap -Dc</i> can be run without the SLURM daemons 
+A sample file is installed in <i>bluegene.conf.example</i>.
+System administrators should use the <i>smap</i> tool to build appropriate
+configuration file for static partitioning.
+Note that <i>smap -Dc</i> can be run without the SLURM daemons
 active to establish the initial configuration.
-Note that the defined bgblocks may not overlap (except for the 
+Note that the defined bgblocks may not overlap (except for the
 full-system bgblock, which is implicitly created).
 See the smap man page for more information.</p>
 
-<p>There are 3 different modes which the system administrator can define  
-BlueGene partitions (or bgblocks) available to execute jobs: static, 
-overlap, and dynamic. 
+<p>There are 3 different modes which the system administrator can define
+BlueGene partitions (or bgblocks) available to execute jobs: static,
+overlap, and dynamic.
 Jobs must then execute in one of the created bgblocks.
 (<b>NOTE:</b> bgblocks are unrelated to SLURM partitions.)</p>
 
 <p>The default mode of partitioning is <i>static</i>.
-In this mode, the system administrator must explicitly define each 
-of the bgblocks in the <i>bluegene.conf</i> file.  
-Each of these bgblocks are explicitly configured with either a 
+In this mode, the system administrator must explicitly define each
+of the bgblocks in the <i>bluegene.conf</i> file.
+Each of these bgblocks are explicitly configured with either a
 mesh or torus interconnect.
-They must also not overlap, except for the implicitly defined full-system 
-bgblock. 
-Note that bgblocks are not rebooted between jobs in the mode 
-except when going to/from full-system jobs. 
-Eliminating bgblock booting can significantly improve system 
+They must also not overlap, except for the implicitly defined full-system
+bgblock.
+Note that bgblocks are not rebooted between jobs in the mode
+except when going to/from full-system jobs.
+Eliminating bgblock booting can significantly improve system
 utilization (eliminating boot time) and reliability.</p>
 
-<p>The second mode is <i>overlap</i> partitioning.  
-Overlap partitioning is very similar to static partitioning in that 
-each bgblocks must be explicitly defined in the <i>bluegene.conf</i> 
-file, but these partitions can overlap each other.  
-In this mode <b>it is highly recommended that none of the bgblocks 
+<p>The second mode is <i>overlap</i> partitioning.
+Overlap partitioning is very similar to static partitioning in that
+each bgblocks must be explicitly defined in the <i>bluegene.conf</i>
+file, but these partitions can overlap each other.
+In this mode <b>it is highly recommended that none of the bgblocks
 have any passthroughs in the X-dimension associated to them</b>.
-Usually this is only an issue on larger BlueGene systems.  
+Usually this is only an issue on larger BlueGene systems.
 <b>It is advisable to use this mode with extreme caution.</b>
-Make sure you know what you doing to assure the bgblocks will 
-boot without dependency on the state of any base partition 
-not included the bgblock.</p>  
+Make sure you know what you doing to assure the bgblocks will
+boot without dependency on the state of any base partition
+not included the bgblock.</p>
 
-<p>In the two previous modes you must insure that the base 
-partitions defined in <i>bluegene.conf</i> are consistent with 
+<p>In the two previous modes you must insure that the base
+partitions defined in <i>bluegene.conf</i> are consistent with
 those defined in <i>slurm.conf</i>.
-Note the <i>bluegene.conf</i> file contains only the numeric 
-coordinates of base partitions while <i>slurm.conf</i> contains 
+Note the <i>bluegene.conf</i> file contains only the numeric
+coordinates of base partitions while <i>slurm.conf</i> contains
 the name prefix in addition to the numeric coordinates.</p>
 
-<p>The final mode is <i>dynamic</i> partitioning.  
-Dynamic partitioning was developed primarily for smaller BlueGene systems, 
+<p>The final mode is <i>dynamic</i> partitioning.
+Dynamic partitioning was developed primarily for smaller BlueGene systems,
 but can be used on larger systems.
 Dynamic partitioning may introduce fragmentation of resources.
-This fragementaiton may be severe since SLURM will run a job anywhere 
-resources are available with little thought of the future.  
-As with overlap partitioning, <b>use dynamic partitioning with 
-caution!</b>  
-This mode can result in job starvation since smaller jobs will run 
+This fragementaiton may be severe since SLURM will run a job anywhere
+resources are available with little thought of the future.
+As with overlap partitioning, <b>use dynamic partitioning with
+caution!</b>
+This mode can result in job starvation since smaller jobs will run
 if resources are available and prevent larger jobs from running.
-Bgblocks need not be assigned in the <i>bluegene.conf</i> file 
+Bgblocks need not be assigned in the <i>bluegene.conf</i> file
 for this mode.</p>
 
 <p>Blocks can be freed or set in an error state with scontrol,
 (i.e. "<i>scontrol update BlockName=RMP0 state=error</i>").
 This will end any job on the block and set the state of the block to ERROR
-making it so no job will run on the block.  To set it back to a useable 
-state set the state to free (i.e. 
-"<i>scontrol update BlockName=RMP0 state=free</i>"). 
+making it so no job will run on the block.  To set it back to a useable
+state set the state to free (i.e.
+"<i>scontrol update BlockName=RMP0 state=free</i>").
 
 <p>Alternatively, if only part of a base partition needs to be put
 into an error state which isn't already in a block of the size you
 need, you can set a set of ionodes into an error state with scontrol,
 (i.e. "<i>scontrol update subbpname=bg000[0-3] state=error</i>").
-This will end any job on the nodes listed, create a block there, and set 
+This will end any job on the nodes listed, create a block there, and set
 the state of the block to ERROR making it so no job will run on the
-block.  To set it back to a useable state set the state to free (i.e. 
+block.  To set it back to a useable state set the state to free (i.e.
 "<i>scontrol update BlockName=RMP0 state=free</i>" or
  "<i>scontrol update subbpname=bg000[0-3] state=free</i>"). This is
  helpful to allow other jobs to run on the unaffected nodes in
  the base partition.
 
 
-<p>One of these modes must be defined in the <i>bluegene.conf</i> file 
+<p>One of these modes must be defined in the <i>bluegene.conf</i> file
 with the option <i>LayoutMode=MODE</i> (where MODE=STATIC, DYNAMIC or OVERLAP).</p>
 
-<p>The number of c-nodes in a base partition and in a node card must 
-be defined.  
-This is done using the keywords <i>BasePartitionNodeCnt=NODE_COUNT</i> 
+<p>The number of c-nodes in a base partition and in a node card must
+be defined.
+This is done using the keywords <i>BasePartitionNodeCnt=NODE_COUNT</i>
 and <i>NodeCardNodeCnt=NODE_COUNT</i> respectively in the <i>bluegene.conf</i>
 file (i.e. <i>BasePartitionNodeCnt=512</i> and <i>NodeCardNodeCnt=32</i>).</p>
 
-<p>Note that the <i>Numpsets</i> values defined in 
+<p>Note that the <i>Numpsets</i> values defined in
 <i>bluegene.conf</i> is used only when SLURM creates bgblocks this
 determines if the system is IO rich or not.  For most bluegene/L
 systems this value is either 8 (for IO poor systems) or 64 (for IO rich
 systems).
 <p>The <i>Images</i> can change during job start based on input from
 the user.
-If you change the bgblock layout, then slurmctld and slurmd should 
+If you change the bgblock layout, then slurmctld and slurmd should
 both be cold-started (e.g. <b>/etc/init.d/slurm startclean</b>).
-If you wish to modify the <i>Numpsets</i> values 
+If you wish to modify the <i>Numpsets</i> values
 for existing bgblocks, either modify them manually or destroy the bgblocks
-and let SLURM recreate them. 
-Note that in addition to the bgblocks defined in <i>bluegene.conf</i>, an 
-additional bgblock is created containing all resources defined 
-all of the other defined bgblocks. 
-Make use of the SLURM partition mechanism to control access to these 
-bgblocks. 
+and let SLURM recreate them.
+Note that in addition to the bgblocks defined in <i>bluegene.conf</i>, an
+additional bgblock is created containing all resources defined
+all of the other defined bgblocks.
+Make use of the SLURM partition mechanism to control access to these
+bgblocks.
 A sample <i>bluegene.conf</i> file is shown below.
 <pre>
 ###############################################################################
@@ -436,9 +436,9 @@ A sample <i>bluegene.conf</i> file is shown below.
 # MloaderImage:         MloaderImage used for creation of all bgblocks.
 # RamDiskImage:         RamDiskImage used for creation of all bgblocks.
 #
-# You may add extra images which a user can specify from the srun 
-# command line (see man srun).  When adding these images you may also add 
-# a Groups= at the end of the image path to specify which groups can 
+# You may add extra images which a user can specify from the srun
+# command line (see man srun).  When adding these images you may also add
+# a Groups= at the end of the image path to specify which groups can
 # use the image.
 #
 # AltBlrtsImage:           Alternative BlrtsImage(s).
@@ -450,13 +450,13 @@ A sample <i>bluegene.conf</i> file is shown below.
 #                       STATIC:  Use defined non-overlapping bgblocks
 #                       OVERLAP: Use defined bgblocks, which may overlap
 #                       DYNAMIC: Create bgblocks as needed for each job
-# BasePartitionNodeCnt: Number of c-nodes per base partition  
+# BasePartitionNodeCnt: Number of c-nodes per base partition
 # NodeCardNodeCnt:      Number of c-nodes per node card.
-# Numpsets:             The Numpsets used for creation of all bgblocks 
-#                       equals this value multiplied by the number of 
+# Numpsets:             The Numpsets used for creation of all bgblocks
+#                       equals this value multiplied by the number of
 #                       base partitions in the bgblock.
 #
-# BridgeAPILogFile:  Pathname of file in which to write the 
+# BridgeAPILogFile:  Pathname of file in which to write the
 #                    Bridge API logs.
 # BridgeAPIVerbose:  How verbose the BG Bridge API logs should be
 #                    0: Log only error and warning messages
@@ -467,10 +467,10 @@ A sample <i>bluegene.conf</i> file is shown below.
 # DenyPassthrough:   Prevents use of passthrough ports in specific
 #                    dimensions, X, Y, and/or Z, plus ALL
 #
-# NOTE: The bgl_serial value is set at configuration time using the 
+# NOTE: The bgl_serial value is set at configuration time using the
 #       "--with-bgl-serial=" option. Its default value is "BGL".
 ###############################################################################
-# These are the default images with are used if the user doesn't specify 
+# These are the default images with are used if the user doesn't specify
 # which image they want
 BlrtsImage=/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts
 LinuxImage=/bgl/BlueLight/ppcfloor/bglsys/bin/zImage.elf
@@ -509,9 +509,9 @@ BridgeAPIVerbose=0
 # Define the static/overlap partitions (bgblocks)
 #
 # BPs: The base partitions (midplanes) in the bgblock using XYZ coordinates
-# Type:  Connection type "MESH" or "TORUS" or "SMALL", default is "TORUS" 
+# Type:  Connection type "MESH" or "TORUS" or "SMALL", default is "TORUS"
 #        Type SMALL will divide a midplane into multiple bgblocks
-#        based off options NodeCards and Quarters to determine type of 
+#        based off options NodeCards and Quarters to determine type of
 #        small blocks.
 #
 # IMPORTANT NOTES:
@@ -530,123 +530,123 @@ BridgeAPIVerbose=0
 ###############################################################################
 # volume = 1x1x1 = 1
 BPs=[000x000] Type=TORUS                            # 1x1x1 =  1 midplane
-BPs=[001x001] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1x1x1 = 4-Nodecard sized 
-                                                    # cnode blocks 3-Base 
-                                                    # Partition Quarter sized 
+BPs=[001x001] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1x1x1 = 4-Nodecard sized
+                                                    # cnode blocks 3-Base
+                                                    # Partition Quarter sized
                                                     # c-node blocks
 
 </pre></p>
 
-<p>The above <i>bluegene.conf</i> file defines multiple bgblocks to be 
-created in a single midplane (see the "SMALL" option). 
+<p>The above <i>bluegene.conf</i> file defines multiple bgblocks to be
+created in a single midplane (see the "SMALL" option).
 Using this mechanism, up to 32 independent jobs each consisting of 1
-  32 cnodes can be executed 
+  32 cnodes can be executed
 simultaneously on a one-rack BlueGene system.
-If defining bgblocks of <i>Type=SMALL</i>, the SLURM partition 
-containing them as defined in <i>slurm.conf</i> must have the 
-parameter <i>Shared=force</i> to enable scheduling of multiple 
-jobs on what SLURM considers a single node. 
-SLURM partitions that do not contain bgblocks of <i>Type=SMALL</i> 
-may have the parameter <i>Shared=no</i> for a slight improvement in 
-scheduler performance. 
-As in all SLURM configuration files, parameters and values 
+If defining bgblocks of <i>Type=SMALL</i>, the SLURM partition
+containing them as defined in <i>slurm.conf</i> must have the
+parameter <i>Shared=force</i> to enable scheduling of multiple
+jobs on what SLURM considers a single node.
+SLURM partitions that do not contain bgblocks of <i>Type=SMALL</i>
+may have the parameter <i>Shared=no</i> for a slight improvement in
+scheduler performance.
+As in all SLURM configuration files, parameters and values
 are case insensitive.</p>
 
 <p> With a BlueGene/P system the image names are different.  The
   correct image names are CnloadImage, MloaderImage, and IoloadImage.
   You can also use alternate images just the same as discribed above.
 
-<p>One more thing is required to support SLURM interactions with 
+<p>One more thing is required to support SLURM interactions with
 the DB2 database (at least as of the time this was written).
 DB2 database access is required by the slurmctld daemon only.
-All other SLURM daemons and commands interact with DB2 using 
+All other SLURM daemons and commands interact with DB2 using
 remote procedure calls, which are processed by slurmctld.
 DB2 access is dependent upon the environment variable
-<i>BRIDGE_CONFIG_FILE</i>. 
-Make sure this is set appropriate before initiating the 
-slurmctld daemon. 
-If desired, this environment variable and any other logic 
-can be executed through the script <i>/etc/sysconfig/slurm</i>, 
-which is automatically executed by <i>/etc/init.d/slurm</i> 
+<i>BRIDGE_CONFIG_FILE</i>.
+Make sure this is set appropriate before initiating the
+slurmctld daemon.
+If desired, this environment variable and any other logic
+can be executed through the script <i>/etc/sysconfig/slurm</i>,
+which is automatically executed by <i>/etc/init.d/slurm</i>
 prior to initiating the SLURM daemons.</p>
 
-<p>When slurmctld is initially started on an idle system, the bgblocks 
-already defined in MMCS are read using the Bridge APIs. 
-If these bgblocks do not correspond to those defined in the <i>bluegene.conf</i> 
-file, the old bgblocks with a prefix of "RMP" are destroyed and new ones 
-created. 
-When a job is scheduled, the appropriate bgblock is identified, 
-its user set, and it is booted. 
+<p>When slurmctld is initially started on an idle system, the bgblocks
+already defined in MMCS are read using the Bridge APIs.
+If these bgblocks do not correspond to those defined in the <i>bluegene.conf</i>
+file, the old bgblocks with a prefix of "RMP" are destroyed and new ones
+created.
+When a job is scheduled, the appropriate bgblock is identified,
+its user set, and it is booted.
 Node use (virtual or coprocessor) is set from the mpirun command line now,
 SLURM has nothing to do with setting the node use.
-Subsequent jobs use this same bgblock without rebooting by changing 
+Subsequent jobs use this same bgblock without rebooting by changing
 the associated user field.
 The only time bgblocks should be freed and rebooted, in normal operation,
-is when going to or from full-system 
-jobs (two or more bgblocks sharing base partitions can not be in a 
+is when going to or from full-system
+jobs (two or more bgblocks sharing base partitions can not be in a
 ready state at the same time).
-When this logic became available at LLNL, approximately 85 percent of 
+When this logic became available at LLNL, approximately 85 percent of
 bgblock boots were eliminated and the overhead of job startup went
 from about 24% to about 6% of total job time.
-Note that bgblocks will remain in a ready (booted) state when 
-the SLURM daemons are stopped. 
-This permits SLURM daemon restarts without loss of running jobs 
+Note that bgblocks will remain in a ready (booted) state when
+the SLURM daemons are stopped.
+This permits SLURM daemon restarts without loss of running jobs
 or rebooting of bgblocks.  </p>
 
-<p>Be aware that SLURM will issue multiple bgblock boot requests as 
-needed (e.g. when the boot fails). 
-If the bgblock boot requests repeatedly fail, SLURM will configure 
-the failing base partitions to a DRAINED state so as to avoid continuing 
-repeated reboots and the likely failure of user jobs. 
-A system administrator should address the problem before returning 
+<p>Be aware that SLURM will issue multiple bgblock boot requests as
+needed (e.g. when the boot fails).
+If the bgblock boot requests repeatedly fail, SLURM will configure
+the failing base partitions to a DRAINED state so as to avoid continuing
+repeated reboots and the likely failure of user jobs.
+A system administrator should address the problem before returning
 the base partitions to service.</p>
 
-<p>If you cold-start slurmctld (<b>/etc/init.d/slurm startclean</b> 
-or <b>slurmctld -c</b>) it is recommended that you also cold-start 
-the slurmd at the same time. 
-Failure to do so may result in errors being reported by both slurmd 
+<p>If you cold-start slurmctld (<b>/etc/init.d/slurm startclean</b>
+or <b>slurmctld -c</b>) it is recommended that you also cold-start
+the slurmd at the same time.
+Failure to do so may result in errors being reported by both slurmd
 and slurmctld due to bgblocks that previously existed being deleted.</p>
 
-<p>A new tool <i>sfree</i> has also been added to help system 
-administrators free a  bgblock on request (i.e. 
-"<i>sfree --bgblock=&lt;blockname&gt;</i>"). 
+<p>A new tool <i>sfree</i> has also been added to help system
+administrators free a  bgblock on request (i.e.
+"<i>sfree --bgblock=&lt;blockname&gt;</i>").
 Run <i>sfree --help</i> for more information.</p>
 
 <h4>Debugging</h4>
 
-<p>All of the testing and debugging guidance provided in 
+<p>All of the testing and debugging guidance provided in
 <a href="quickstart_admin.html"> Quick Start Administrator Guide</a>
 apply to BlueGene systems.
-One can start the <i>slurmctld</i> and <i>slurmd</i> in the foreground 
-with extensive debugging to establish basic functionality. 
-Once running in production, the configured <i>SlurmctldLog</i> and 
+One can start the <i>slurmctld</i> and <i>slurmd</i> in the foreground
+with extensive debugging to establish basic functionality.
+Once running in production, the configured <i>SlurmctldLog</i> and
 <i>SlurmdLog</i> files will provide historical system information.
-On BlueGene systems, there is also a <i>BridgeAPILogFile</i> defined 
-in <i>bluegene.conf</i> which can be configured to contain detailed 
+On BlueGene systems, there is also a <i>BridgeAPILogFile</i> defined
+in <i>bluegene.conf</i> which can be configured to contain detailed
 information about every Bridge API call issued.</p>
 
-<p>Note that slurmcltld log messages of the sort 
-<i>Nodes bg[000x133] not responding</i> are indicative of the slurmd 
-daemon serving as a front-end to those base partitions is not responding (on 
-non-BlueGene systems, the slurmd actually does run on the compute 
+<p>Note that slurmcltld log messages of the sort
+<i>Nodes bg[000x133] not responding</i> are indicative of the slurmd
+daemon serving as a front-end to those base partitions is not responding (on
+non-BlueGene systems, the slurmd actually does run on the compute
 nodes, so the message is more meaningful there). </p>
 
-<p>Note that you can emulate a BlueGene/L system on stand-alone Linux 
-system. 
+<p>Note that you can emulate a BlueGene/L system on stand-alone Linux
+system.
 Run <b>configure</b> with the <b>--enable-bgl-emulation</b> option.
 This will define "HAVE_BG", "HAVE_BGL", and "HAVE_FRONT_END" in the
-config.h file. 
+config.h file.
 You can also emulate a BlueGene/P system with
-  the <b>--enable-bgp-emulation</b> option. 
+  the <b>--enable-bgp-emulation</b> option.
 This will define "HAVE_BG", "HAVE_BGP", and "HAVE_FRONT_END" in the
-config.h file. 
-Then execute <b>make</b> normally. 
-These variables will build the code as if it were running 
+config.h file.
+Then execute <b>make</b> normally.
+These variables will build the code as if it were running
 on an actual BlueGene computer, but avoid making calls to the
-Bridge libary (that is controlled by the variable "HAVE_BG_FILES", 
-which is left undefined). You can use this to test configurations, 
+Bridge libary (that is controlled by the variable "HAVE_BG_FILES",
+which is left undefined). You can use this to test configurations,
 scheduling logic, etc. </p>
- 
+
 <p class="footer"><a href="#top">top</a></p>
 
 <p style="text-align:center;">Last modified 17 March 2009</p>
diff --git a/doc/html/checkpoint_blcr.shtml b/doc/html/checkpoint_blcr.shtml
index 917a4fa5ab34208b752544b614c58b6d3adcb236..f61ad5c977f69f15d42fb2d4fff54495d07367ba 100644
--- a/doc/html/checkpoint_blcr.shtml
+++ b/doc/html/checkpoint_blcr.shtml
@@ -3,7 +3,7 @@
 <h1><a name="top">SLURM Checkpoint/Restart with BLCR</a></h1>
 
 <h2>Overview</h2>
-<p>SLURM version 2.0 has been integrated with 
+<p>SLURM version 2.0 has been integrated with
 <a href="https://ftg.lbl.gov/CheckpointRestart/CheckpointRestart.shtml">
 Berkeley Lab Checkpoint/Restart (BLCR)</a> in order to provide automatic
 job checkpoint/restart support.
@@ -12,7 +12,7 @@ Functionality provided includes:
 <li>Checkpoint of whole batch jobs in addition to job steps</li>
 <li>Periodic checkpoint of batch jobs and job steps</li>
 <li>Restart execution of batch jobs and job steps from checkpoint files</li>
-<li>Automatically  requeue and restart the execution of batch jobs upon 
+<li>Automatically  requeue and restart the execution of batch jobs upon
 node failure</li>
 </ol></p>
 
@@ -44,7 +44,7 @@ files of a job step will be read from</li>
 <li><b>SLURM_RESTART_DIR</b> is equivalent to <b>--restart-dir</b></li>
 </li>
 </ul>
-<p>The environment variable <b>SLURM_SRUN_CR_SOCKET</b> is used for job step 
+<p>The environment variable <b>SLURM_SRUN_CR_SOCKET</b> is used for job step
 logic to interact with the <b>srun_cr</b> command.</p>
 
 <h3>srun_cr</h3>
@@ -53,25 +53,25 @@ logic to interact with the <b>srun_cr</b> command.</p>
 plugin to checkpoint/restart tasks launched by srun.
 The design of <b>srun_cr</b> is inspired by <b>mpiexec_cr</b> from MVAPICH2 and
 <b>cr_restart</b> form BLCR.
-It is a wrapper around the <b>srun</b> command to enable batch job 
+It is a wrapper around the <b>srun</b> command to enable batch job
 checkpoint/restart support when used with SLURM's <b>checkpoint/blcr</b> plugin.
 
-<p>The <b>srun_cr</b> execute line options are identical to those of the 
+<p>The <b>srun_cr</b> execute line options are identical to those of the
 <b>srun</b> command.
 See "man srun" for details.</p>
 
 <p>After initialization, <b>srun_cr</b> registers a thread context callback
 function.
 Then it forks a process and executes "cr_run --omit srun" with its arguments.
-<b>cr_run</b> is employed to exclude the <b>srun</b> process from being dumped 
+<b>cr_run</b> is employed to exclude the <b>srun</b> process from being dumped
 upon checkpoint.
-All catchable signals except SIGCHLD sent to <b>srun_cr</b> will be forwarded 
+All catchable signals except SIGCHLD sent to <b>srun_cr</b> will be forwarded
 to the child <b>srun</b> process.
 SIGCHLD will be captured to mimic the exit status of <b>srun</b> when it exits.
-Then <b>srun_cr</b> loops waiting for termination of tasks being launched 
+Then <b>srun_cr</b> loops waiting for termination of tasks being launched
 from <b>srun</b>.</p>
 
-<p>The step launch logic of SLURM is augmented to check if <b>srun</b> is 
+<p>The step launch logic of SLURM is augmented to check if <b>srun</b> is
 running under <b>srun_cr</b>.
 If true, the environment variable <b>SURN_SRUN_CR_SOCKET</b> should be present,
 the value of which is the address of a Unix domain socket created and listened
@@ -80,16 +80,16 @@ After launching the tasks, <b>srun</b> tires to connect to the socket and sends
 the job ID, step ID and the nodes allocated to the step to <b>srun_cr</b>.</p>
 
 <p>Upon checkpoint, </b>srun_cr</b> checks to see if the tasks have been launched.
-If not </b>srun_cr</b> first forwards the checkpoint request to the tasks by 
+If not </b>srun_cr</b> first forwards the checkpoint request to the tasks by
 calling the SLURM API <b>slurm_checkpoint_tasks()</b> before dumping its process
 context.</p>
 
-<p>Upon restart, <b>srun_cr</b> checks to see if the tasks have been previously 
-launched and checkpointed. 
-If true, the environment variable </b>SLURM_RESTART_DIR</b> is set to the 
+<p>Upon restart, <b>srun_cr</b> checks to see if the tasks have been previously
+launched and checkpointed.
+If true, the environment variable </b>SLURM_RESTART_DIR</b> is set to the
 directory of the checkpoint image files of the tasks.
-Then <b>srun</b> is forked and executed again. 
-The environment variable will be used by the <b>srun</b> command to restart 
+Then <b>srun</b> is forked and executed again.
+The environment variable will be used by the <b>srun</b> command to restart
 execution of the tasks from the previous checkpoint.</p>
 
 <h3>sbatch</h3>
@@ -117,34 +117,34 @@ and <i>"&lt;job_id&gt;.&lt;step_id&gt;.ckpt"</i> for job steps.</li>
 
 <p><b>scontrol</b> is used to initiate checkpoint/restart requests.</p>
 <ul>
-<li><b>scontrol checkpoint create <i>jobid</i> [ImageDir=<i>dir</i>] 
+<li><b>scontrol checkpoint create <i>jobid</i> [ImageDir=<i>dir</i>]
 [MaxWait=<i>seconds</i>]</b><br>
 Requests a checkpoint on a specific job.
 For backward compatibility, if a job id is specified, all job steps of
-it are checkpointed. 
+it are checkpointed.
 If a batch job id is specified, the entire job is checkpointed including
 the batch shell and all running tasks of all job steps.
-Upon checkpoint, the task launch command must forward the requests to 
+Upon checkpoint, the task launch command must forward the requests to
 tasks it launched.
 <ul>
-<li><b>ImageDir</b> specifies the directory in which to save the checkpoint 
+<li><b>ImageDir</b> specifies the directory in which to save the checkpoint
 image files. If specified, this takes precedence over any <b>--checkpoint-dir</b>
 option specified when the job or job step were submitted.</li>
-<li><b>MaxWait</b> specifies the maximum time permitted for a checkpoint 
-request to complete. The request will be considered failed if not 
+<li><b>MaxWait</b> specifies the maximum time permitted for a checkpoint
+request to complete. The request will be considered failed if not
 completed in this time period.</li>
 </li>
 </ul>
 
-<li><b>scontrol checkpoint create <i>jobid.stepid</i> [ImageDir=<i>dir</i>] 
+<li><b>scontrol checkpoint create <i>jobid.stepid</i> [ImageDir=<i>dir</i>]
 [MaxWait=<i>seconds</i>]</b><br>
 Requests a checkpoint on a specific job step.</li>
 
-<li><b>scontrol checkpoint restart <i>jobid</i> [ImageDir=<i>dir</i>] 
+<li><b>scontrol checkpoint restart <i>jobid</i> [ImageDir=<i>dir</i>]
 [StickToNodes]</b><br>
 Restart a previously checkpointed batch job.
 <ul>
-<li><b>ImageDir</b> specifies the directory from which to read the checkpoint 
+<li><b>ImageDir</b> specifies the directory from which to read the checkpoint
 image files.</li>
 <li><b>StickToNodes</b> specifies that the job should be restarted on the
 same set of nodes from which it was previously checkpointed.</li>
@@ -155,7 +155,7 @@ same set of nodes from which it was previously checkpointed.</li>
 
 <p>The following SLURM configuration parameter has been added:</p>
 <ul>
-<li><b>JobCheckpointDir</b> specified the default directory for storing 
+<li><b>JobCheckpointDir</b> specified the default directory for storing
 or reading job checkpoint files</li>
 </ul>
 
diff --git a/doc/html/checkpoint_plugins.shtml b/doc/html/checkpoint_plugins.shtml
index d458732d43dd1cdb78c1a23c49be4a6ee5928615..6275148d2ac56b11ba2372f9f44c424fac0f1998 100644
--- a/doc/html/checkpoint_plugins.shtml
+++ b/doc/html/checkpoint_plugins.shtml
@@ -3,17 +3,17 @@
 <h1><a name="top">SLURM Job Checkpoint Plugin Programmer Guide</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM job checkpoint plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p> This document describes SLURM job checkpoint plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 job checkpoint plugins. This is version 0 of the API.</p>
 
-<p>SLURM job checkpoint plugins are SLURM plugins that implement the SLURM 
-API for checkpointing and restarting jobs. 
+<p>SLURM job checkpoint plugins are SLURM plugins that implement the SLURM
+API for checkpointing and restarting jobs.
 The plugins must conform to the SLURM Plugin API with the following specifications:</p>
 
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;checkpoint.&quot; The minor type can be any recognizable 
-abbreviation for the type of checkpoint mechanism. 
+The major type must be &quot;checkpoint.&quot; The minor type can be any recognizable
+abbreviation for the type of checkpoint mechanism.
 We recommend, for example:</p>
 <ul>
 <li><b>aix</b>&#151;AIX system checkpoint.</li>
@@ -25,37 +25,37 @@ Berkeley Lab Checkpoint/Restart (BLCR)</a></li>
 <li><b>xlch</b>&#151;XLCH</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for 
-job checkpoint support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for
+job checkpoint support.
 Note carefully, however, the versioning discussion below.</p>
 
-<p>The programmer is urged to study 
+<p>The programmer is urged to study
 <span class="commandline">src/plugins/checkpoint/checkpoint_aix.c</span>
 for a sample implementation of a SLURM job checkpoint plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <span class="commandline">errno</span> to allow SLURM to discover 
-as practically as possible the reason for any failed API call. Plugin-specific enumerated 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <span class="commandline">errno</span> to allow SLURM to discover
+as practically as possible the reason for any failed API call. Plugin-specific enumerated
 integer values may be used when appropriate.
 
-<p>These values must not be used as return values in integer-valued functions 
-in the API. The proper error return value from integer-valued functions is SLURM_ERROR. 
-The implementation should endeavor to provide useful and pertinent information by 
-whatever means is practical. 
-Successful API calls are not required to reset any errno to a known value. However, 
-the initial value of any errno, prior to any error condition arising, should be 
+<p>These values must not be used as return values in integer-valued functions
+in the API. The proper error return value from integer-valued functions is SLURM_ERROR.
+The implementation should endeavor to provide useful and pertinent information by
+whatever means is practical.
+Successful API calls are not required to reset any errno to a known value. However,
+the initial value of any errno, prior to any error condition arising, should be
 SLURM_SUCCESS. </p>
 
-<p>There is also a checkpoint-specific error code and message that may be associated 
+<p>There is also a checkpoint-specific error code and message that may be associated
 with each job step.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <p class="commandline">int slurm_ckpt_alloc_job (check_jobinfo_t *jobinfo);</p>
@@ -63,21 +63,21 @@ be stubbed.</p>
 checkpoint data.</p>
 <p style="margin-left:.2in"><b>Argument</b>:
 <b>jobinfo</b> (output) returns pointer to the allocated storage.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_free_job (check_jobinfo_t jobinfo);</p>
 <p style="margin-left:.2in"><b>Description</b>: Release storage for job-step specific
 checkpoint data that was previously allocated by slurm_ckpt_alloc_job.</p>
 <p style="margin-left:.2in"><b>Argument</b>:
-<b>jobinfo</b> (input) pointer to the previously allocated storage.</p> 
+<b>jobinfo</b> (input) pointer to the previously allocated storage.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_pack_job (check_jobinfo_t jobinfo, Buf buffer);</p>
-<p style="margin-left:.2in"><b>Description</b>: Store job-step specific checkpoint data 
+<p style="margin-left:.2in"><b>Description</b>: Store job-step specific checkpoint data
 into a buffer.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <b>jobinfo</b> (input) pointer to the previously allocated storage.<br>
@@ -85,7 +85,7 @@ into a buffer.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
-                                                                                                                       
+
 <p class="commandline">int slurm_ckpt_unpack_job (check_jobinfo_t jobinfo, Buf buffer);</p>
 <p style="margin-left:.2in"><b>Description</b>: Retrieve job-step specific checkpoint data
 from a buffer.</p>
@@ -98,21 +98,21 @@ to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_op ( uint32_t job_id, uint32_t step_id,
 struct step_record *step_ptr, uint16_t op, uint16_t data,
-char *image_dir, time_t *event_time, 
+char *image_dir, time_t *event_time,
 uint32_t *error_code, char **error_msg );</p>
-<p style="margin-left:.2in"><b>Description</b>: Perform some checkpoint operation on a 
+<p style="margin-left:.2in"><b>Description</b>: Perform some checkpoint operation on a
 specific job step.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <b>job_id</b> (input) identifies the job to be operated upon.
-May be SLURM_BATCH_SCRIPT for a batch job or NO_VAL for all steps of the 
+May be SLURM_BATCH_SCRIPT for a batch job or NO_VAL for all steps of the
 specified job.</br>
 <b>step_id</b> (input) identifies the job step to be operated upon.<br>
 <b>step_ptr</b> (input) pointer to the job step to be operated upon.
 Used by checkpoint/aix only.<br>
-<b>op</b> (input) specifies the operation to be performed. 
-Currently supported operations include 
+<b>op</b> (input) specifies the operation to be performed.
+Currently supported operations include
 CHECK_ABLE (is job step currently able to be checkpointed),
-CHECK_DISABLE (disable checkpoints for this job step), 
+CHECK_DISABLE (disable checkpoints for this job step),
 CHECK_ENABLE (enable checkpoints for this job step),
 CHECK_CREATE (create a checkpoint for this job step and continue its execution),
 CHECK_VACATE (create a checkpoint for this job step and terminate it),
@@ -120,15 +120,15 @@ CHECK_RESTART (restart this previously checkpointed job step), and
 CHECK_ERROR (return checkpoint-specific error information for this job step).<br>
 <b>data</b> (input) operation-specific data.<br>
 <b>image_dir</b> (input) directory to be used to save or restore state.<br>
-<b>event_time</b> (output) identifies the time of a checkpoint or restart 
+<b>event_time</b> (output) identifies the time of a checkpoint or restart
 operation.</br>
-<b>error_code</b> (output) returns checkpoint-specific error code 
+<b>error_code</b> (output) returns checkpoint-specific error code
 associated with an operation.</br>
-<b>error_msg</b> (output) identifies checkpoint-specific error message 
+<b>error_msg</b> (output) identifies checkpoint-specific error message
 associated with an operation.</p>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR and set the error_code and error_msg 
+the plugin should return SLURM_ERROR and set the error_code and error_msg
 to an appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time,
@@ -136,24 +136,24 @@ uint32_t error_code, char *error_msg );</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the completion of a checkpoint operation.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <b>step_ptr</b> (input/output) identifies the job step to be operated upon.</br>
-<b>event_time</b> (input) identifies the time that the checkpoint operation 
+<b>event_time</b> (input) identifies the time that the checkpoint operation
 began.</br>
-<b>error_code</b> (input) checkpoint-specific error code associated 
+<b>error_code</b> (input) checkpoint-specific error code associated
 with an operation.</br>
-<b>error_msg</b> (input) checkpoint-specific error message associated 
+<b>error_msg</b> (input) checkpoint-specific error message associated
 with an operation.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the error_code and error_msg to an
 appropriate value to indicate the reason for failure.</p>
-                                                                                                                       
+
 <p class="commandline">int slurm_ckpt_stepd_prefork ( void *slurmd_job );</p>
 <p style="margin-left:.2in"><b>Description</b>: Do preparation work for
 the checkpoint/restart support. This function is called by <b>slurmstepd</b>
 before forking the user tasks.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <b>slurmd_job</b> (input) pointer to job structure internal to slurmstepd.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
-On failure, the plugin should return SLURM_ERROR and set the error_code 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the error_code
 and error_msg to an appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_signal_tasks ( void *slurmd_job,
@@ -163,8 +163,8 @@ request to tasks managed by <b>slurmstepd</b>.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <b>slurmd_job</b> (input) pointer to job structure internal to slurmstepd.</br>
 <b>image_dir</b> (input) directory to be used to save or restore state.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
-On failure, the plugin should return SLURM_ERROR and set the error_code 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the error_code
 and error_msg to an appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_restart_task ( void *slurmd_job,
@@ -175,15 +175,15 @@ of a tasks from a checkpoint image, called by <b>slurmstepd</b>.</p>
 <b>slurmd_job</b> (input) pointer to job structure internal to slurmstepd.<br>
 <b>image_dir</b> (input) directory to be used to save or restore state.<br>
 <b>gtid</b> (input) global task ID to be restarted</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
-On failure, the plugin should return SLURM_ERROR and set the error_code 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the error_code
 and error_msg to an appropriate value to indicate the reason for failure.</p>
 
 
 <h2>Versioning</h2>
-<p> This document describes version 100 of the SLURM checkpoint API. 
-Future releases of SLURM may revise this API. 
-A checkpoint plugin conveys its ability to implement a particular API 
+<p> This document describes version 100 of the SLURM checkpoint API.
+Future releases of SLURM may revise this API.
+A checkpoint plugin conveys its ability to implement a particular API
 version using the mechanism outlined for SLURM plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/coding_style.pdf b/doc/html/coding_style.pdf
index d7f5fdf2bce8d60e50983a7d1d4081aedd739ecf..640336faed5ab35166c60416f972c51772f81bd6 100644
Binary files a/doc/html/coding_style.pdf and b/doc/html/coding_style.pdf differ
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index 1c6b74537a5f513ef0b028bd50cc6f804e435550..c844b7d87ba6f8f5939c73a49edbd40414d3c39c 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -45,11 +45,11 @@ function get_accounting_storage_type_field(gather, form_storage)
   {
     if (form_storage[i].checked)
     {
-      if(form_storage[i].value == "none" && !(gather == "none")) 
+      if(form_storage[i].value == "none" && !(gather == "none"))
       {
 	return "filetxt"
       }
-      else if(!(form_storage[i].value == "none") && gather == "none")   
+      else if(!(form_storage[i].value == "none") && gather == "none")
       {
 	return "none"
       }
@@ -117,7 +117,7 @@ function get_task_plugin_param()
   for (var i=0; i<document.config.task_plugin.length; i++) {
     if (document.config.task_plugin[i].checked) {
        if (document.config.task_plugin[i].value == "affinity") {
-         return "TaskPluginParam=" + 
+         return "TaskPluginParam=" +
 		get_radio_value(document.config.task_plugin_param)
        }
     }
@@ -129,7 +129,7 @@ function hide_box()
 {
    var popup = document.getElementById('out_box');
    popup.style.visibility = 'hidden';
-    
+
 }
 
 function displayfile()
@@ -289,21 +289,21 @@ function displayfile()
    get_field2(" CoresPerSocket",document.config.cores_per_socket) +
    get_field2(" ThreadsPerCore",document.config.threads_per_core) +
    " State=UNKNOWN <br>" +
-   "PartitionName=" + document.config.partition_name.value + 
-   " Nodes=" + document.config.node_name.value + 
+   "PartitionName=" + document.config.partition_name.value +
+   " Nodes=" + document.config.node_name.value +
    " Default=YES"+
    " MaxTime=" + document.config.max_time.value +
    " State=UP"
 
    //scroll(0,0);
    //var popup = document.getElementById('out_box');
-   
+
    //popup.innerHTML = "<a href='javascript:hide_box();'>close</a><br>";
    //popup.innerHTML += "#BEGIN SLURM.CONF FILE<br><br>";
    //popup.innerHTML += printme;
    //popup.innerHTML += "<br><br>#END SLURM.CONF FILE<br>";
    //popup.innerHTML += "<a href='javascript:hide_box();'>close</a>";
-   
+
    //popup.style.visibility = 'visible';
 
    // OLD CODE
@@ -319,29 +319,29 @@ function displayfile()
 <BODY>
 <FORM name=config>
 <H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool</H1>
-<P>This form can be used to create a SLURM configuration file with 
+<P>This form can be used to create a SLURM configuration file with
 you controlling many of the important configuration parameters.</P>
 
 <P><B>This tool supports SLURM version @SLURM_MAJOR@.@SLURM_MINOR@ only.</B>
-Configuration files for other versions of SLURM should be built 
+Configuration files for other versions of SLURM should be built
 using the tool distributed with it in <i>doc/html/configurator.html</i>.
 Some parameters will be set to default values, but you can
 manually edit the resulting <I>slurm.conf</I> as desired
 for greater flexibility. See <I>man slurm.conf</I> for more
 details about the configuration parameters.</P>
 
-<P>Note the while SLURM daemons create log files and other files as needed, 
-it treats the lack of parent directories as a fatal error. 
+<P>Note the while SLURM daemons create log files and other files as needed,
+it treats the lack of parent directories as a fatal error.
 This prevents the daemons from running if critical file systems are
-not mounted and will minimize the risk of cold-starting (starting 
+not mounted and will minimize the risk of cold-starting (starting
 without preserving jobs).</P>
 
-<P>Note that this configuration file must be installed on all nodes 
+<P>Note that this configuration file must be installed on all nodes
 in your cluster.</P>
 
-<P>After you have filled in the fields of interest, use the 
+<P>After you have filled in the fields of interest, use the
 "Submit" button on the bottom of the page to build the <I>slurm.conf</I>
-file. It will appear on your web browser. Save the file in text format 
+file. It will appear on your web browser. Save the file in text format
 as <I>slurm.conf</I> for use by SLURM.
 
 <P>For more information about SLURM, see
@@ -354,31 +354,31 @@ Define the hostname of the computer on which the SLURM controller and
 optional backup controller will execute. You can also specify addresses
 of these computers if desired (defaults to their hostnames).
 The IP addresses can be either numeric IP addresses or names.
-Hostname values should should not be the fully qualified domain 
+Hostname values should should not be the fully qualified domain
 name (e.g. use <I>linux</I> rather than <I>linux.llnl.gov</I>).
 <P>
 <input type="text" name="control_machine" value="linux0"> <B>ControlMachine</B>:
 Master Controller Hostname
 <P>
-<input type="text" name="control_addr"> <B>ControlAddr</B>: Master Controller 
+<input type="text" name="control_addr"> <B>ControlAddr</B>: Master Controller
 Address (optional)
 <P>
-<input type="text" name="backup_controller"> <B>BackupController</B>: Backup 
+<input type="text" name="backup_controller"> <B>BackupController</B>: Backup
 Controller Hostname (optional)
 <P>
-<input type="text" name="backup_addr"> <B>BackupAddr</B>: Backup Controller 
+<input type="text" name="backup_addr"> <B>BackupAddr</B>: Backup Controller
 Address (optional)
 <P>
 
 <H2>Compute Machines</H2>
 Define the machines on which user applications can run.
-You can also specify addresses of these computers if desired 
+You can also specify addresses of these computers if desired
 (defaults to their hostnames).
-Only a few of the possible parameters associated with the nodes will 
-be set by this tool, but many others are available. 
-All of the nodes will be placed into a single partition (or queue) 
-with global access. Many options are available to group nodes into 
-partitions with a wide variety of configuration parameters. 
+Only a few of the possible parameters associated with the nodes will
+be set by this tool, but many others are available.
+All of the nodes will be placed into a single partition (or queue)
+with global access. Many options are available to group nodes into
+partitions with a wide variety of configuration parameters.
 Manually edit the <i>slurm.conf</i> produced to exercise these options.
 Node names and addresses may be specified using a numeric range specification.
 
@@ -389,17 +389,17 @@ Compute nodes
 <input type="text" name="node_addr"> <B>NodeAddr</B>: Compute node addresses
 (optional)
 <P>
-<input type="text" name="partition_name" value="debug"> <B>PartitionName</B>: 
+<input type="text" name="partition_name" value="debug"> <B>PartitionName</B>:
 Name of the one partition to be created
 <P>
-<input type="text" name="max_time" value="INFINITE"> <B>MaxTime</B>: 
+<input type="text" name="max_time" value="INFINITE"> <B>MaxTime</B>:
 Maximum time limit of jobs in minutes or INFINITE
 <P>
 The following parameters describe a node's configuration.
 Set a value for <B>Procs</B>.
 The other parameters are optional, but provide more control over scheduled resources:
 <P>
-<input type="text" name="procs" value="1"> <B>Procs</B>: Count of processors 
+<input type="text" name="procs" value="1"> <B>Procs</B>: Count of processors
 on each compute node.
 If Procs is omitted, it will be inferred from:
 Sockets, CoresPerSocket, and ThreadsPerCore.
@@ -420,15 +420,15 @@ the logical number of processors per socket.
 <B>ThreadsPerCore</B>:
 Number of logical threads in a single physical core.
 <P>
-<input type="text" name="memory" value=""> <B>RealMemory</B>: Amount 
-of real memory. This parameter is required when specifying Memory as a 
-consumable resource with the select/cons_res plug-in. See below 
-under Resource Selection. 
+<input type="text" name="memory" value=""> <B>RealMemory</B>: Amount
+of real memory. This parameter is required when specifying Memory as a
+consumable resource with the select/cons_res plug-in. See below
+under Resource Selection.
 <P>
 
 <H2>SLURM User</H2>
-The SLURM controller (slurmctld) can run without elevated privileges, 
-so it is recommended that a user "slurm" be created for it. For testing 
+The SLURM controller (slurmctld) can run without elevated privileges,
+so it is recommended that a user "slurm" be created for it. For testing
 purposes any user name can be used.
 <P>
 <input type="text" name="slurm_user" value="slurm"> <B>SlurmUser</B>
@@ -444,7 +444,7 @@ entries to prevent this from happening by setting
 <B>CacheGroups</B>=1.  Reconfiguring ("scontrol reconfig") with
 <B>CacheGroups</B>=0 will cause slurmd to purge the cache.  Select one
 value for <B>CacheGroups</B>:<BR>
-<input type="radio" name="cache_groups" value="0" checked> 
+<input type="radio" name="cache_groups" value="0" checked>
 <B>0</B>: for normal environment.<BR>
 <input type="radio" name="cache_groups" value="1">
 <B>1</B>: for slow NIS environment.
@@ -455,10 +455,10 @@ after making any changes to system password or group databases.
 <P>
 
 <H2>SLURM Port Numbers</H2>
-The SLURM controller (slurmctld) requires a unique port for communications 
-as do the SLURM compute node daemons (slurmd). If not set, slurm ports 
-are set by checking for an entry in <I>/etc/services</I> and if that 
-fails by using an interval default set at SLURM build time. 
+The SLURM controller (slurmctld) requires a unique port for communications
+as do the SLURM compute node daemons (slurmd). If not set, slurm ports
+are set by checking for an entry in <I>/etc/services</I> and if that
+fails by using an interval default set at SLURM build time.
 <P>
 <input type="text" name="slurmctld_port" value="6817"> <B>SlurmctldPort</B>
 <P>
@@ -468,21 +468,21 @@ fails by using an interval default set at SLURM build time.
 <H2>Authentication and Security</H2>
 Define the method used for authenticating communicating between SLURM components.<BR>
 Select one value for <B>AuthType</B>:<BR>
-<input type="radio" name="auth_type" value="none"> <B>None</B>: No authentication, 
+<input type="radio" name="auth_type" value="none"> <B>None</B>: No authentication,
 not recommended production use<br>
-<input type="radio" name="auth_type" value="authd"> <B>Authd</B>: Brent Chun's 
+<input type="radio" name="auth_type" value="authd"> <B>Authd</B>: Brent Chun's
 <A href="http://www.theether.org/authd/">authd</A><BR>
-<input type="radio" name="auth_type" value="munge" checked> <B>Munge</B>: LLNL's 
+<input type="radio" name="auth_type" value="munge" checked> <B>Munge</B>: LLNL's
 <A href="http://home.gna.org/munge/">Munge</A><BR>
 <P>
 Library used for job step cryptographic signature generation.<BR>
 Select one value for <B>CryptoType</B>:<BR>
-<input type="radio" name="crypto_type" value="munge" checked><B>Munge</B>: LLNL's 
+<input type="radio" name="crypto_type" value="munge" checked><B>Munge</B>: LLNL's
 <A href="http://home.gna.org/munge/">Munge</A> (has Gnu Public License)<BR>
 <input type="radio" name="crypto_type" value="openssl"> <B>OpenSSL</B>:
-<A href="http://www.openssl.org/">OpenSSL</A> 
+<A href="http://www.openssl.org/">OpenSSL</A>
 <P>
-Define the location of public and private keys used by SLURM's 
+Define the location of public and private keys used by SLURM's
 cryptographic signature generation plugin (CryptoType).<br>
 <b>These values are only used if CryptoType=OpenSSL.</b><br>
 These files need to be generated by the SLURM administrator.
@@ -494,8 +494,8 @@ Specify fully qualified pathnames.
 <P>
 
 <H2>State Preservation</H2>
-Define the location of a directory where the slurmctld daemon saves its state. 
-This should be a fully qualified pathname which can be read and written to 
+Define the location of a directory where the slurmctld daemon saves its state.
+This should be a fully qualified pathname which can be read and written to
 by the SLURM user on both the control machine and backup controller (if configured).
 The location of a directory where slurmd saves state should also be defined.
 This must be a unique directory on each compute server (local disk).
@@ -511,37 +511,37 @@ Define when a non-responding (DOWN) node is returned to service.<BR>
 Select one value for <B>ReturnToService</B>:<BR>
 <input type="radio" name="return_to_service" value="0">
 <B>0</B>: When explicitly restored to service by an administrator.<BR>
-<input type="radio" name="return_to_service" value="1" checked> 
+<input type="radio" name="return_to_service" value="1" checked>
 <B>1</B>: Automatically, when slurmd daemon registers with valid configuration<BR>
 <P>
- 
+
 <H2>Scheduling</H2>
 Define the mechanism to be used for controlling job ordering.<BR>
 Select one value for <B>SchedulerType</B>:<BR>
-<input type="radio" name="sched_type" value="builtin">  <B>Builtin</B>: First-In 
+<input type="radio" name="sched_type" value="builtin">  <B>Builtin</B>: First-In
 First-Out (FIFO)<BR>
-<input type="radio" name="sched_type" value="backfill" checked> <B>Backfill</B>: 
+<input type="radio" name="sched_type" value="backfill" checked> <B>Backfill</B>:
 FIFO with backfill<BR>
-<input type="radio" name="sched_type" value="gang">  <B>Gang</B>: Gang scheduling 
+<input type="radio" name="sched_type" value="gang">  <B>Gang</B>: Gang scheduling
 (time-slicing for parallel jobs)<BR>
-<input type="radio" name="sched_type" value="wiki"> <B>Wiki</B>: Wiki interface 
+<input type="radio" name="sched_type" value="wiki"> <B>Wiki</B>: Wiki interface
 to Maui (configuration parameter <B>SchedulerPort</B> must specified)<BR>
-<input type="radio" name="sched_type" value="wiki2"> <B>Wiki2</B>: Wiki interface 
+<input type="radio" name="sched_type" value="wiki2"> <B>Wiki2</B>: Wiki interface
 to Moab (configuration parameter <B>SchedulerPort</B> must specified)<BR>
 <P>
-<input type="text" name="scheduler_port" value="7321"> <B>SchedulerPort</B>: scheduler 
+<input type="text" name="scheduler_port" value="7321"> <B>SchedulerPort</B>: scheduler
 communications port (used by Wiki only)
 <P>
-Define what node configuration (sockets, cores, memory, etc.) should be used. 
+Define what node configuration (sockets, cores, memory, etc.) should be used.
 Using values defined in the configuration file will provide faster scheduling.<BR>
 Select one value for <B>FastSchedule</B>:<BR>
 <input type="radio" name="fast_schedule" value="1" checked>
 <B>1</B>: Use node configuration values defined in configuration file<BR>
 <input type="radio" name="fast_schedule" value="0">
-<B>0</B>: Use node configuration values actually found on each node 
-(if configured with with gang scheduling or allocation of individual 
-processors to jobs rather than only whole node allocations, the processor 
-count on the node should match the configured value to avoid having extra 
+<B>0</B>: Use node configuration values actually found on each node
+(if configured with with gang scheduling or allocation of individual
+processors to jobs rather than only whole node allocations, the processor
+count on the node should match the configured value to avoid having extra
 processors left idle)
 <P>
 
@@ -549,43 +549,43 @@ processors left idle)
 Define the node interconnect used.<BR>
 Select one value for <B>SwitchType</B>:<BR>
 <input type="radio" name="switch_type" value="elan"> <B>Elan</B>: Quadrics Elan3 or Elan4<BR>
-<input type="radio" name="switch_type" value="federation"> <B>Federation</B>: IBM 
+<input type="radio" name="switch_type" value="federation"> <B>Federation</B>: IBM
 Federation Switch<BR>
-<input type="radio" name="switch_type" value="none" checked> <B>None</B>: No special 
+<input type="radio" name="switch_type" value="none" checked> <B>None</B>: No special
 handling required (InfiniBand, Myrinet, Ethernet, etc.)<BR>
 <P>
 
 <H2>Default MPI Type</H2>
-Specify the type of MPI to be used by default. SLURM will configure environment 
+Specify the type of MPI to be used by default. SLURM will configure environment
 variables accordingly. Users can over-ride this specification with an srun option.<BR>
 Select one value for <B>MpiDefault</B>:<BR>
 <input type="radio" name="mpi_default" value="mpichgm"> <B>MPICH-GM</B><BR>
 <input type="radio" name="mpi_default" value="mpichmx"> <B>MPICH-MX</B><BR>
 <input type="radio" name="mpi_default" value="mpich1_p4"> <B>MPICH1-P4</B><BR>
-<input type="radio" name="mpi_default" value="mpich1_shmem"> <B>MPICH1-SHMEM</B>: 
+<input type="radio" name="mpi_default" value="mpich1_shmem"> <B>MPICH1-SHMEM</B>:
 This also works for MVAPICH-SHMEM.<BR>
 <input type="radio" name="mpi_default" value="mvapich"> <B>MVAPICH</B><BR>
-<input type="radio" name="mpi_default" value="none" checked> <B>None</B>: 
+<input type="radio" name="mpi_default" value="none" checked> <B>None</B>:
 +This works for most other MPI types including MPICH2, LAM MPI and Open MPI.<BR>
 <P>
 
 <H2>Process Tracking</H2>
 Define the algorithm used to identify which processes are associated with a
-given job. This is used signal, kill, and account for the processes associated 
+given job. This is used signal, kill, and account for the processes associated
 with a job step.<BR>
 Select one value for <B>ProctrackType</B>:<BR>
-<input type="radio" name="proctrack_type" value="aix"> <B>AIX</B>: Use AIX kernel 
+<input type="radio" name="proctrack_type" value="aix"> <B>AIX</B>: Use AIX kernel
 extension, recommended for AIX systems<BR>
 <input type="radio" name="proctrack_type" value="pgid" checked> <B>Pgid</B>: Use Unix
-Process Group ID, processes changing their process group ID can escape from SLURM 
+Process Group ID, processes changing their process group ID can escape from SLURM
 control<BR>
 <input type="radio" name="proctrack_type" value="linuxproc"> <B>LinuxProc</B>: Use
-parent process ID records, required for MPICH-GM use, processes can escape 
+parent process ID records, required for MPICH-GM use, processes can escape
 from SLURM control<BR>
 <input type="radio" name="proctrack_type" value="rms"> <B>RMS</B>: Use Quadrics
 kernel infrastructure, recommended for systems where this is available<BR>
 <input type="radio" name="proctrack_type" value="sgi_job"> <B>SGI's PAGG
-module</B>: Use <A HREF="http://oss.sgi.com/projects/pagg/">SGI's Process 
+module</B>: Use <A HREF="http://oss.sgi.com/projects/pagg/">SGI's Process
 Aggregates (PAGG) kernel module</A>, recommended where available<BR>
 <P>
 
@@ -597,17 +597,17 @@ Select one value for <B>SelectType</B>:<BR>
 <DL>
 <DL>
 <DT><B>SelectTypeParameters</B> (As used by <I>SelectType=Cons_res</I> only):
-    <DD> Note: The -E extension for sockets, cores, and threads 
-	are ignored within the node allocation mechanism 
-	when CR_CPU or CR_CPU_MEMORY is selected. 
-	They are considered to compute the total number of 
+    <DD> Note: The -E extension for sockets, cores, and threads
+	are ignored within the node allocation mechanism
+	when CR_CPU or CR_CPU_MEMORY is selected.
+	They are considered to compute the total number of
 	tasks when -n is not specified
     <DD> Note: CR_MEMORY assumes MaxShare value of one of higher
 <DT> <input type="radio" name="cons_res_params" value="CR_CPU" checked
 	    onClick="javascript:set_select_type(this, 'cons_res')">
     <B>CR_CPU</B>: (default)
-    CPUs as consumable resources. 
-    <DD> No notion of sockets, cores, or threads. 
+    CPUs as consumable resources.
+    <DD> No notion of sockets, cores, or threads.
     On a multi-core system, cores will be considered CPUs.
     On a multi-core/hyperthread system, threads will be considered CPUs.
     On a single-core systems CPUs are CPUs. ;-)
@@ -616,10 +616,10 @@ Select one value for <B>SelectType</B>:<BR>
     <B>CR_Socket</B>: Sockets as a consumable resource.
 <DT> <input type="radio" name="cons_res_params" value="CR_Core"
 	    onClick="javascript:set_select_type(this)">
-    <B>CR_Core</B>: Cores as a consumable resource. 
+    <B>CR_Core</B>: Cores as a consumable resource.
 <DT> <input type="radio" name="cons_res_params" value="CR_Memory"
 	    onClick="javascript:set_select_type(this)">
-    <B>CR_Memory</B>: Memory as a consumable resource. 
+    <B>CR_Memory</B>: Memory as a consumable resource.
     <DD> Note: CR_Memory assumes MaxShare value of one of higher
 <DT> <input type="radio" name="cons_res_params" value="CR_CPU_Memory"
 	    onClick="javascript:set_select_type(this)">
@@ -636,19 +636,19 @@ Select one value for <B>SelectType</B>:<BR>
 </DL>
 </DL>
 <input type="radio" name="select_type" value="linear" checked>
-<B>Linear</B>: Node-base 
+<B>Linear</B>: Node-base
 resource allocation, does not manage individual processor allocation<BR>
 <input type="radio" name="select_type" value="bluegene">
 <B>BlueGene</B>: For IBM Blue Gene systems only<BR>
 <P>
 
 <H2>Task Launch</H2>
-Define a task launch plugin. This may be used to 
+Define a task launch plugin. This may be used to
 provide resource management within a node (e.g. pinning
 tasks to specific processors).
 Select one value for <B>TaskPlugin</B>:<BR>
 <input type="radio" name="task_plugin" value="none" checked> <B>None</B>: No task launch actions<BR>
-<input type="radio" name="task_plugin" value="affinity"> <B>Affinity</B>: 
+<input type="radio" name="task_plugin" value="affinity"> <B>Affinity</B>:
 CPU affinity support
 (see srun man pages for the --cpu_bind, --mem_bind, and -E options)
 <DL><DL>
@@ -664,7 +664,7 @@ CPU affinity support
 
 <H2>Prolog and Epilog</H2>
 <P>
-<B>Prolog/Epilog</B>: Fully qualified path that will be executed as 
+<B>Prolog/Epilog</B>: Fully qualified path that will be executed as
 root on every node of a user's job before the job's tasks
 will be initiated there and after that job has terminated.
 These parameters are optional.
@@ -674,8 +674,8 @@ These parameters are optional.
 </DL>
 
 <P>
-<B>SrunProlog/Epilog</B>: Fully qualified path to be executed by srun at 
-job step initiation and termination. These parameters may be overridden by 
+<B>SrunProlog/Epilog</B>: Fully qualified path to be executed by srun at
+job step initiation and termination. These parameters may be overridden by
 srun's --prolog and --epilog options
 These parameters are optional.
 <DL>
@@ -684,7 +684,7 @@ These parameters are optional.
 </DL>
 
 <P>
-<B>TaskProlog/Epilog</B>: Fully qualified path to be executed as the user 
+<B>TaskProlog/Epilog</B>: Fully qualified path to be executed as the user
 before each task begins execution and after each task terminates.
 These parameters are optional.
 <DL>
@@ -693,9 +693,9 @@ These parameters are optional.
 </DL>
 
 <H2>Event Logging</H2>
-Slurmctld and slurmd daemons can each be configured with different 
+Slurmctld and slurmd daemons can each be configured with different
 levels of logging verbosity from 0 (quiet) to 7 (extremely verbose).
-Each may also be configured to use debug files. Use fully qualified 
+Each may also be configured to use debug files. Use fully qualified
 pathnames for the files.
 <P>
 <input type="text" name="slurmctld_debug" value="3"> <B>SlurmctldDebug</B> (0 to 7)
@@ -704,47 +704,47 @@ pathnames for the files.
 <P>
 <input type="text" name="slurmd_debug" value="3"> <B>SlurmdDebug</B> (0 to 7)
 <P>
-<input type="text" name="slurmd_logfile" value=""> <B>SlurmdLogFile</B> (default is none, 
+<input type="text" name="slurmd_logfile" value=""> <B>SlurmdLogFile</B> (default is none,
 log goes to syslog, string "%h" in name gets replaced with hostname)
 <P>
 
 <H2>Job Completion Logging</H2>
-Define the job completion logging mechanism to be used.<BR> 
+Define the job completion logging mechanism to be used.<BR>
 Select one value for <B>JobCompType</B>:<BR>
-<input type="radio" name="job_comp_type" value="none" checked> <B>None</B>: 
+<input type="radio" name="job_comp_type" value="none" checked> <B>None</B>:
 No job completion logging<BR>
-<input type="radio" name="job_comp_type" value="filetxt"> <B>FileTxt</B>: 
+<input type="radio" name="job_comp_type" value="filetxt"> <B>FileTxt</B>:
 Write job completion status to a text file<BR>
-<input type="radio" name="job_comp_type" value="script"> <B>Script</B>: 
+<input type="radio" name="job_comp_type" value="script"> <B>Script</B>:
 Use an arbitrary script to log job completion<BR>
-<input type="radio" name="job_comp_type" value="mysql"> <B>MySQL</B>: 
+<input type="radio" name="job_comp_type" value="mysql"> <B>MySQL</B>:
 Write completion status to a MySQL database<BR>
-<input type="radio" name="job_comp_type" value="pgsql"> <B>PGSQL</B>: 
+<input type="radio" name="job_comp_type" value="pgsql"> <B>PGSQL</B>:
 Write completion status to a PostreSQL database<BR>
-<input type="radio" name="job_comp_type" value="slurmdbd"> <B>SlurmDBD</B>: 
-Write completion status to Slurm a database daemon (serving multiple Slurm clusters) 
+<input type="radio" name="job_comp_type" value="slurmdbd"> <B>SlurmDBD</B>:
+Write completion status to Slurm a database daemon (serving multiple Slurm clusters)
 which will write to some database<BR>
 <P>
-<input type="text" name="job_comp_loc" value=""> <B>JobCompLoc</B>: 
+<input type="text" name="job_comp_loc" value=""> <B>JobCompLoc</B>:
 This is the location of the text file to be written to (if JobCompType=filetst)
 or the script to be run (if JobCompType=script) or database name (for other values
 of JobCompType).
 <p><b>Options below are for use with a database to specify where the database is running and how to connect to it</b><br>
-<input type="text" name="job_comp_host" value=""> <B>JobCompHost</B>: 
+<input type="text" name="job_comp_host" value=""> <B>JobCompHost</B>:
 Host the database is running on for Job completion<br>
-<input type="text" name="job_comp_port" value=""> <B>JobCompPort</B>: 
+<input type="text" name="job_comp_port" value=""> <B>JobCompPort</B>:
 Port the database server is listening on for Job completion<br>
-<input type="text" name="job_comp_user" value=""> <B>JobCompUser</B>: 
+<input type="text" name="job_comp_user" value=""> <B>JobCompUser</B>:
 User we are to use to talk to the database for Job completion<br>
-<input type="text" name="job_comp_pass" value=""> <B>JobCompPass</B>: 
+<input type="text" name="job_comp_pass" value=""> <B>JobCompPass</B>:
 Password we are to use to talk to the database for Job completion<br>
 <P>
 
 <H2>Job Accounting Gather</H2>
-SLURM accounts for resource use per job.  System specifics can be polled 
+SLURM accounts for resource use per job.  System specifics can be polled
 determined by system type<BR>
 Select one value for <B>JobAcctGatherType</B>:<BR>
-<input type="radio" name="job_acct_gather_type" value="none" checked> <B>None</B>: No 
+<input type="radio" name="job_acct_gather_type" value="none" checked> <B>None</B>: No
 job accounting<BR>
 <input type="radio" name="job_acct_gather_type" value="aix"> <B>AIX</B>: Specifc
 AIX process table information gathered, use with AIX systems only<BR>
@@ -757,39 +757,39 @@ polling interval in seconds. Zero disables periodic sampling.<BR>
 <H2>Job Accounting Storage</H2>
 Used with the Job Accounting Gather SLURM can store the accounting information in many different fashions.  Fill in your systems choice here<BR>
 Select one value for <B>AccountingStorageType</B>:<BR>
-<input type="radio" name="accounting_storage_type" value="none" checked> <B>None</B>: 
+<input type="radio" name="accounting_storage_type" value="none" checked> <B>None</B>:
 No job accounting storage<BR>
-<input type="radio" name="accounting_storage_type" value="filetxt"> <B>FileTxt</B>: 
+<input type="radio" name="accounting_storage_type" value="filetxt"> <B>FileTxt</B>:
 Write job accounting to a text file<BR>
-<input type="radio" name="accounting_storage_type" value="gold"> <B>Gold</B>: 
-Write completion status to Gold database daemon which can securely 
+<input type="radio" name="accounting_storage_type" value="gold"> <B>Gold</B>:
+Write completion status to Gold database daemon which can securely
 save the data from many Slurm managed clusters into a common database<BR>
-<input type="radio" name="accounting_storage_type" value="mysql"> <B>MySQL</B>: 
+<input type="radio" name="accounting_storage_type" value="mysql"> <B>MySQL</B>:
 Write job accounting to a MySQL database<BR>
-<input type="radio" name="accounting_storage_type" value="pgsql"> <B>PGSQL</B>: 
+<input type="radio" name="accounting_storage_type" value="pgsql"> <B>PGSQL</B>:
 Write job accounting to a PostreSQL database<BR>
-<input type="radio" name="accounting_storage_type" value="slurmdbd"> <B>SlurmDBD</B>: 
-Write job accounting to Slurm DBD (database daemon) which can securely 
+<input type="radio" name="accounting_storage_type" value="slurmdbd"> <B>SlurmDBD</B>:
+Write job accounting to Slurm DBD (database daemon) which can securely
 save the data from many Slurm managed clusters into a common database<BR>
 <input type="text" name="accounting_storage_loc" value=""> <B>AccountingStorageLoc</B>:
 Location specification or database name.
 This is the location of the text file to be written to (used by Log only).
 Use a fully qualified pathname. If using a database it is the name of the database you will use or create for the stored data.<br>
 <p><b>Options below are for use with a database to specify where the database is running and how to connect to it</b><br>
-<input type="text" name="accounting_storage_host" value=""> <B>AccountingStorageHost</B>: 
+<input type="text" name="accounting_storage_host" value=""> <B>AccountingStorageHost</B>:
 Host the database is running on for Job Accounting<br>
-<input type="text" name="accounting_storage_port" value=""> <B>AccountingStoragePort</B>: 
+<input type="text" name="accounting_storage_port" value=""> <B>AccountingStoragePort</B>:
 Port the database server is listening on for Job Accounting<br>
-<input type="text" name="accounting_storage_user" value=""> <B>AccountingStorageUser</B>: 
+<input type="text" name="accounting_storage_user" value=""> <B>AccountingStorageUser</B>:
 User we are to use to talk to the database for Job Accounting<br>
-<input type="text" name="accounting_storage_pass" value=""> <B>AccountingStoragePass</B>: 
-Password we are to use to talk to the database for Job Accounting. 
+<input type="text" name="accounting_storage_pass" value=""> <B>AccountingStoragePass</B>:
+Password we are to use to talk to the database for Job Accounting.
 In the case of SlurmDBD, this will be an alternate socket name for use with a Munge
 daemon providing enterprise-wide authentication (while the default Munge socket
 would provide cluster-wide authentication only).<br>
-<input type="text" name="cluster_name" value="cluster"> <B>ClusterName</B>: 
-Name to be recorded in database for jobs from this cluster. 
-This is important if a single database is used to record information 
+<input type="text" name="cluster_name" value="cluster"> <B>ClusterName</B>:
+Name to be recorded in database for jobs from this cluster.
+This is important if a single database is used to record information
 from multiple Slurm-managed clusters.<br>
 
 <P>
@@ -799,47 +799,47 @@ Define the location into which we can record the daemon's process ID.
 This is used for locate the appropriate daemon for signalling.
 Specify a specify the fully qualified pathname for the file.
 <P>
-<input type="text" name="slurmctld_pid_file" value="/var/run/slurmctld.pid"> 
+<input type="text" name="slurmctld_pid_file" value="/var/run/slurmctld.pid">
 <B>SlurmctldPidFile</B>
 <P>
-<input type="text" name="slurmd_pid_file" value="/var/run/slurmd.pid"> 
+<input type="text" name="slurmd_pid_file" value="/var/run/slurmd.pid">
 <B>SlurmdPidFile</B>
 <P>
 
 <H2>Timers</H2>
-SLURM has a variety of timers to control when to consider a node DOWN, 
+SLURM has a variety of timers to control when to consider a node DOWN,
 when to purge job records, how long to give a job to gracefully terminate, etc.
 <P>
 <input type="text" name="slurmctld_timeout" value="300">
-<B>SlurmctldTimeout</B>: How many seconds the backup controller waits before 
+<B>SlurmctldTimeout</B>: How many seconds the backup controller waits before
 becoming the master controller
 <P>
 <input type="text" name="slurmd_timeout" value="300">
-<B>SlurmdTimeout</B>: How many seconds the SLURM controller waits for the slurmd 
+<B>SlurmdTimeout</B>: How many seconds the SLURM controller waits for the slurmd
 to respond to a request before considering the node DOWN
 <P>
 <input type="text" name="inactive_limit" value="0">
-<B>InactiveLimit</B>: How many seconds the SLURM controller waits for srun 
-commands to respond before considering the job or job step inactive and 
+<B>InactiveLimit</B>: How many seconds the SLURM controller waits for srun
+commands to respond before considering the job or job step inactive and
 terminating it. A value of zero indicates unlimited wait
 <P>
 <input type="text" name="min_job_age" value="300">
 <B>MinJobAge</B>: How many seconds the SLURM controller waits after a
-job terminates before purging its record. A record of the job will 
-persist in job completion and/or accounting records indefinitely, 
-but will no longer be visible with the squeue command after puring 
+job terminates before purging its record. A record of the job will
+persist in job completion and/or accounting records indefinitely,
+but will no longer be visible with the squeue command after puring
 <P>
 <input type="text" name="kill_wait" value="30">
-<B>KillWait</B>: How many seconds a job is given to gracefully terminate 
-after reaching its time limit and being sent SIGTERM before sending 
+<B>KillWait</B>: How many seconds a job is given to gracefully terminate
+after reaching its time limit and being sent SIGTERM before sending
 a SIGKILLL
 <P>
 <input type="text" name="wait_time" value="0">
-<B>WaitTime</B>: How many seconds after a job step's first task terminates 
+<B>WaitTime</B>: How many seconds after a job step's first task terminates
 before terminating all remaining tasks. A value of zero indicates unlimited wait
-<P> 
+<P>
 
-<BR> 
+<BR>
 <BR>
 <input type=button value="Submit" onClick="javascript:displayfile()">
 <input type=reset value="Reset Form">
diff --git a/doc/html/cons_res.shtml b/doc/html/cons_res.shtml
index e9ffcce6d436653b2c762821044b1afad40fe3c2..971f36df0e2399c478f150d27e89e9af5c8a2fca 100644
--- a/doc/html/cons_res.shtml
+++ b/doc/html/cons_res.shtml
@@ -20,15 +20,15 @@ this plug-in is described below.
 <ol start=1 type=1>
  <li><b>SLURM version 1.2 and newer</b></li>
   <ul>
-   <li>Consumable resources has been enhanced with several new resources 
-    --namely CPU (same as in previous version), Socket, Core, Memory 
+   <li>Consumable resources has been enhanced with several new resources
+    --namely CPU (same as in previous version), Socket, Core, Memory
     as well as any combination of the logical processors with Memory:</li>
    <ul>
-     <li><b>CPU</b> (<i>CR_CPU</i>): CPU as a consumable resource. 
+     <li><b>CPU</b> (<i>CR_CPU</i>): CPU as a consumable resource.
        <ul>
        <li>No notion of sockets, cores, or threads.</li>
-       <li>On a multi-core system CPUs will be cores.</li> 
-       <li>On a multi-core/hyperthread system CPUs will be threads.</li>  
+       <li>On a multi-core system CPUs will be cores.</li>
+       <li>On a multi-core/hyperthread system CPUs will be threads.</li>
        <li>On a single-core systems CPUs are CPUs. ;-) </li>
        </ul>
      <li><b>Socket</b> (<i>CR_Socket</i>): Socket as a consumable
@@ -44,9 +44,9 @@ this plug-in is described below.
      <li><b>CPU and Memory</b> (<i>CR_CPU_Memory</i>) CPU and Memory
      as consumable resources.</li>
    </ul>
-  <li>In the cases where Memory is the consumable resource or one of 
-      the two consumable resources the <b>Memory</b> parameter which  
-      defines a node amount of real memory in slurm.conf must be 
+  <li>In the cases where Memory is the consumable resource or one of
+      the two consumable resources the <b>Memory</b> parameter which
+      defines a node amount of real memory in slurm.conf must be
       set when fastschedule=1.
   <li>srun's <i>-E</i> extension for sockets, cores, and threads are
       ignored within the node allocation mechanism when CR_CPU or
@@ -83,41 +83,41 @@ SelectType=select/cons_res
 #    - select/bluegene - this parameter is currently ignored
 #    - select/linear   - this parameter is currently ignored
 #    - select/cons_res - the parameters available are
-#       - CR_CPU  (1)  - CPUs as consumable resources. 
-#                        No notion of sockets, cores, or threads. 
+#       - CR_CPU  (1)  - CPUs as consumable resources.
+#                        No notion of sockets, cores, or threads.
 #                        On a multi-core system CPUs will be cores
-#                        On a multi-core/hyperthread system CPUs 
+#                        On a multi-core/hyperthread system CPUs
 #                                        will be threads
 #                        On a single-core systems CPUs are CPUs.
 #      - CR_Socket (2) - Sockets as a consumable resource.
-#      - CR_Core   (3) - Cores as a consumable resource. 
-#      - CR_Memory (4) - Memory as a consumable resource. 
+#      - CR_Core   (3) - Cores as a consumable resource.
+#      - CR_Memory (4) - Memory as a consumable resource.
 #                        Note! CR_Memory assumes Shared=Yes
-#      - CR_Socket_Memory (5) - Socket and Memory as consumable 
+#      - CR_Socket_Memory (5) - Socket and Memory as consumable
 #                               resources.
-#      - CR_Core_Memory (6)   - Core and Memory as consumable 
+#      - CR_Core_Memory (6)   - Core and Memory as consumable
 #                               resources. (Not yet implemented)
-#      - CR_CPU_Memory (7)    - CPU and Memory as consumable 
+#      - CR_CPU_Memory (7)    - CPU and Memory as consumable
 #                               resources.
 #
 # (#) refer to the output of "scontrol show config"
 #
-# NB!:   The -E extension for sockets, cores, and threads 
-#        are ignored within the node allocation mechanism 
-#        when CR_CPU or CR_CPU_MEMORY is selected. 
-#        They are considered to compute the total number of 
+# NB!:   The -E extension for sockets, cores, and threads
+#        are ignored within the node allocation mechanism
+#        when CR_CPU or CR_CPU_MEMORY is selected.
+#        They are considered to compute the total number of
 #        tasks when -n is not specified
 #
-# NB! All CR_s assume Shared=No or Shared=Force EXCEPT for 
+# NB! All CR_s assume Shared=No or Shared=Force EXCEPT for
 #        CR_MEMORY which assumes Shared=Yes
 #
 #SelectTypeParameters=CR_CPU (default)
 </pre>
-  <li>Using <i>--overcommit</i> or <i>-O</i> is allowed in this new version 
-    of consumable resources. When the process to logical processor pinning is 
-    enabled (task/affinity plug-in) the extra processes will not affect 
+  <li>Using <i>--overcommit</i> or <i>-O</i> is allowed in this new version
+    of consumable resources. When the process to logical processor pinning is
+    enabled (task/affinity plug-in) the extra processes will not affect
     co-scheduled jobs other than other jobs started with the -O flag.
-    We are currently investigating alternative approaches of handling the 
+    We are currently investigating alternative approaches of handling the
     pinning of jobs started with <i>--overcommit</i></li>
   <li><i>-c</i> or <i>--cpus-per-task</i> works in this version of
     consumable resources</li>
@@ -125,33 +125,33 @@ SelectType=select/cons_res
  <li><b>General comments</b></li>
  <ul>
   <li>SLURM's default <b>select/linear</b> plugin is using a best fit algorithm based on
-    number of consecutive nodes. The same node allocation approach is used in 
+    number of consecutive nodes. The same node allocation approach is used in
     <b>select/cons_res</b> for consistency.</li>
   <li>The <b>select/cons_res</b> plugin is enabled or disabled cluster-wide.</li>
-  <li>In the case where <b>select/cons_res</b> is not enabled, the normal SLURM behaviors 
-    are not disrupted. The only changes, users see when using the <b>select/cons_res</b> 
-    plug-in, are that jobs can be co-scheduled on nodes when resources permits it. 
-    The rest of SLURM such as srun and switches (except srun -s ...), etc. are not 
-    affected by this plugin. SLURM is, from a user point of view, working the same 
+  <li>In the case where <b>select/cons_res</b> is not enabled, the normal SLURM behaviors
+    are not disrupted. The only changes, users see when using the <b>select/cons_res</b>
+    plug-in, are that jobs can be co-scheduled on nodes when resources permits it.
+    The rest of SLURM such as srun and switches (except srun -s ...), etc. are not
+    affected by this plugin. SLURM is, from a user point of view, working the same
     way as when using the default node selection scheme.</li>
-  <li>The <i>--exclusive</i> srun switch allows users to request nodes in 
-    exclusive mode even when consumable resources is enabled. see "man srun" 
-    for details. </li>        
-  <li>srun's <i>-s</i> or <i>--share</i> is incompatible with the consumable resource 
-    environment and will therefore not be honored. Since in this environment nodes 
+  <li>The <i>--exclusive</i> srun switch allows users to request nodes in
+    exclusive mode even when consumable resources is enabled. see "man srun"
+    for details. </li>
+  <li>srun's <i>-s</i> or <i>--share</i> is incompatible with the consumable resource
+    environment and will therefore not be honored. Since in this environment nodes
     are shared by default, <i>--exclusive</i> allows users to obtain dedicated nodes.</li>
  </ul>
 </ol>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<h2>Limitation and future work</h2> 
+<h2>Limitation and future work</h2>
 
 <p>We are aware of several limitations with the current consumable
 resource plug-in and plan to make enhancement the plug-in as we get
 time as well as request from users to help us prioritize the features.
 
-Please send comments and requests about the consumable resources to 
+Please send comments and requests about the consumable resources to
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>.
 
 <ol start=1 type=1>
@@ -159,7 +159,7 @@ Please send comments and requests about the consumable resources to
     <ul>
       <li><b>Problem:</b> The example below was achieve when using CR_CPU
       (default mode). The systems are all "dual socket, dual core,
-      single threaded systems (= 4 cpus per system)".</li> 
+      single threaded systems (= 4 cpus per system)".</li>
       <li>The first 3 serial jobs are being allocated to node hydra12
       which means that one CPU is still available on hydra12.</li>
       <li>The 4th job "srun -N 2-2 -E 2:2 sleep 100" requires 8 CPUs
@@ -211,19 +211,19 @@ JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
 
 <p class="footer"><a href="#top">top</a></p>
 
-<h2>Examples of CR_Memory, CR_Socket_Memory, and CR_CPU_Memory type consumable resources</h2> 
+<h2>Examples of CR_Memory, CR_Socket_Memory, and CR_CPU_Memory type consumable resources</h2>
 
 <pre>
 # sinfo -lNe
-NODELIST     NODES PARTITION  STATE  CPUS  S:C:T MEMORY 
-hydra[12-16]     5 allNodes*  ...       4  2:2:1   2007 
+NODELIST     NODES PARTITION  STATE  CPUS  S:C:T MEMORY
+hydra[12-16]     5 allNodes*  ...       4  2:2:1   2007
 </pre>
 
 <p>Using select/cons_res plug-in with CR_Memory</p>
 <pre>
 Example:
 # srun -N 5 -n 20 --job-mem=1000 sleep 100 &  <-- running
-# srun -N 5 -n 20 --job-mem=10 sleep 100 &    <-- running 
+# srun -N 5 -n 20 --job-mem=10 sleep 100 &    <-- running
 # srun -N 5 -n 10 --job-mem=1000 sleep 100 &  <-- queued and waiting for resources
 
 # squeue
@@ -245,7 +245,7 @@ JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
  1889  allNodes  sleep   sballe   R   0:08      5 hydra[12-16]
 
 Example 2:
-# srun -N 5 -n 10 --job-mem=10 sleep 100 & <-- running 
+# srun -N 5 -n 10 --job-mem=10 sleep 100 & <-- running
 # srun -n 1 --job-mem=10 sleep 100 & <-- queued and waiting for resourcessqueue
 
 # squeue
@@ -257,7 +257,7 @@ JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
 <p>Using select/cons_res plug-in with CR_CPU_Memory (4 CPUs/node)</p>
 <pre>
 Example 1:
-# srun -N 5 -n 5 --job-mem=1000 sleep 100 &  <-- running 
+# srun -N 5 -n 5 --job-mem=1000 sleep 100 &  <-- running
 # srun -N 5 -n 5 --job-mem=10 sleep 100 &    <-- running
 # srun -N 5 -n 5 --job-mem=1000 sleep 100 &  <-- queued and waiting for resources
 
@@ -268,7 +268,7 @@ JOBID PARTITION   NAME     USER  ST   TIME  NODES NODELIST(REASON)
  1834  allNodes  sleep   sballe   R   0:07      5 hydra[12-16]
 
 Example 2:
-# srun -N 5 -n 20 --job-mem=10 sleep 100 & <-- running 
+# srun -N 5 -n 20 --job-mem=10 sleep 100 & <-- running
 # srun -n 1 --job-mem=10 sleep 100 &       <-- queued and waiting for resources
 
 # squeue
@@ -356,7 +356,7 @@ JOBID PARTITION   NAME   USER  ST   TIME  NODES NODELIST(REASON)
 <p>Once Job 3 finishes, Job 5 is allocated resources and can run.</p>
 
 <p>The advantage of the exclusive mode scheduling policy is
-that the a job gets all the resources of the assigned nodes for optimal 
+that the a job gets all the resources of the assigned nodes for optimal
 parallel performance. The drawback is
 that jobs can tie up large amount of resources that it does not use and which
 cannot be shared with other jobs.</p>
diff --git a/doc/html/cons_res_share.shtml b/doc/html/cons_res_share.shtml
index 84e6db3fa4b143cf1025bc5f088682b3e1c68edf..2142d3541039e2b7de8792c0b8604b184167b481 100644
--- a/doc/html/cons_res_share.shtml
+++ b/doc/html/cons_res_share.shtml
@@ -234,15 +234,15 @@ the job.</TD>
 </TABLE>
 <P>Users can specify their job's memory requirements one of two ways. The
 <CODE>srun --mem=&lt;num&gt;</CODE> option can be used to specify the jobs
-memory requirement on a per allocated node basis. This option is recommended 
-for use with the <CODE>select/linear</CODE> plugin, which allocates 
+memory requirement on a per allocated node basis. This option is recommended
+for use with the <CODE>select/linear</CODE> plugin, which allocates
 whole nodes to jobs. The
 <CODE>srun --mem-per-cpu=&lt;num&gt;</CODE> option can be used to specify the
 jobs memory requirement on a per allocated CPU basis. This is recommended
-for use with the <CODE>select/cons_res</CODE> plugin which can 
+for use with the <CODE>select/cons_res</CODE> plugin which can
 allocate individual CPUs to jobs.</P>
 
-<P>Default and maximum values for memory on a per node or per CPU basis can 
+<P>Default and maximum values for memory on a per node or per CPU basis can
 be configured by the system administrator using the following
 <CODE>slurm.conf</CODE> options: <CODE>DefMemPerCPU</CODE>,
 <CODE>DefMemPerNode</CODE>, <CODE>MaxMemPerCPU</CODE> and
@@ -254,8 +254,8 @@ the maximum value.
 Enforcement of a jobs memory allocation is performed by setting the "maximum
 data segment size" and the "maximum virtual memory size" system limits to the
 appropriate values before launching the tasks. Enforcement is also managed by
-the accounting plugin, which periodically gathers data about running jobs. Set 
-<CODE>JobAcctGather</CODE> and <CODE>JobAcctFrequency</CODE> to 
+the accounting plugin, which periodically gathers data about running jobs. Set
+<CODE>JobAcctGather</CODE> and <CODE>JobAcctFrequency</CODE> to
 values suitable for your system.</P>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/cray.shtml b/doc/html/cray.shtml
index 0d92a64b4ae903590b6b4c981bd1099e60beab46..7f1a7968660874f4756bed803f740025305183d8 100644
--- a/doc/html/cray.shtml
+++ b/doc/html/cray.shtml
@@ -8,19 +8,19 @@
 
 <p>This document describes the unique features of SLURM on
 Cray computers.
-You should be familiar with the SLURM's mode of operation on Linux clusters 
-before studying the relatively few differences in Cray system 
+You should be familiar with the SLURM's mode of operation on Linux clusters
+before studying the relatively few differences in Cray system
 operation described in this document.</p>
 
 <p>SLURM's primary mode of operation is designed for use on clusters with
-nodes configured in a one-dimensional space. 
-Minor changes were required for the <i>smap</i> and <i>sview</i> tools 
-to map nodes in a three-dimensional space. 
-Some changes are also desirable to optimize job placement in three-dimensional 
+nodes configured in a one-dimensional space.
+Minor changes were required for the <i>smap</i> and <i>sview</i> tools
+to map nodes in a three-dimensional space.
+Some changes are also desirable to optimize job placement in three-dimensional
 space.</p>
 
 <p>SLURM has added an interface to Cray's Application Level Placement Scheduler
-(ALPS). The ALPS <i>aprun</i> command must used for task launch rather than SLURM's 
+(ALPS). The ALPS <i>aprun</i> command must used for task launch rather than SLURM's
 <i>srun</i> command. You should create a resource reservation using SLURM's
 <i>salloc</i> or <i>sbatch</i> command and execute <i>aprun</i> from within
 that allocation. <//p>
@@ -29,14 +29,14 @@ that allocation. <//p>
 
 <h3>Cray/ALPS configuration</h3>
 
-<p>Node names must have a three-digit suffix describing their 
-zero-origin position in the X-, Y- and Z-dimension respectively (e.g. 
-"tux000" for X=0, Y=0, Z=0; "tux123" for X=1, Y=2, Z=3). 
+<p>Node names must have a three-digit suffix describing their
+zero-origin position in the X-, Y- and Z-dimension respectively (e.g.
+"tux000" for X=0, Y=0, Z=0; "tux123" for X=1, Y=2, Z=3).
 Rectangular prisms of nodes can be specified in SLURM commands and
-configuration files using the system name prefix with the end-points 
-enclosed in square brackets and separated by an "x". 
-For example "tux[620x731]" is used to represent the eight nodes in a 
-block with endpoints at "tux620" and "tux731" (tux620, tux621, tux630, 
+configuration files using the system name prefix with the end-points
+enclosed in square brackets and separated by an "x".
+For example "tux[620x731]" is used to represent the eight nodes in a
+block with endpoints at "tux620" and "tux731" (tux620, tux621, tux630,
 tux631, tux720, tux721, tux730, tux731).
 <b>NOTE:</b> We anticipate that Cray will provide node coordinate
 information via the ALPS interface in the future, which may result
@@ -47,18 +47,18 @@ BATCH.</p>
 
 <h3>SLURM configuration</h3>
 
-<p>Four variables must be defined in the <i>config.h</i> file: 
-<i>APBASIL_LOC</i> (location of the <i>apbasil</i> command), 
+<p>Four variables must be defined in the <i>config.h</i> file:
+<i>APBASIL_LOC</i> (location of the <i>apbasil</i> command),
 <i>HAVE_FRONT_END</i>, <i>HAVE_CRAY_XT</i> and <i>HAVE_3D</i>.
-The <i>apbasil</i> command should automatically be found. 
+The <i>apbasil</i> command should automatically be found.
 If that is not the case, please notify us of its location on your system
 and we will add that to the search paths tested at configure time.
-The other variable definitions can be initiated in several different 
+The other variable definitions can be initiated in several different
 ways depending upon how SLURM is being built.
 <ol>
-<li>Execute the <i>configure</i> command with the option 
+<li>Execute the <i>configure</i> command with the option
 <i>--enable-cray-xt</i> <b>OR</b></li>
-<li>Execute the <i>rpmbuild</i> command with the option 
+<li>Execute the <i>rpmbuild</i> command with the option
 <i>--with cray_xt</i> <b>OR</b></li>
 <li>Add <i>%with_cray_xt 1</i> to your <i>~/.rpmmacros</i> file.</li>
 </ol></p>
@@ -68,39 +68,39 @@ the system. It is from here that users will execute <i>aprun</i>
 commands to launch tasks.
 This is specified in the <i>slurm.conf</i> file by using the
 <i>NodeName</i> field to identify the compute nodes and both the
-<i>NodeAddr</i> and <i>NodeHostname</i> fields to identify the 
+<i>NodeAddr</i> and <i>NodeHostname</i> fields to identify the
 computer when <i>slurmd</i> runs (normally some sort of front-end node)
 as seen in the examples below.</p>
 
-<p>Next you need to select from two options for the resource selection 
+<p>Next you need to select from two options for the resource selection
 plugin (the <i>SelectType</i> option in SLURM's <i>slurm.conf</i> configuration
 file):
 <ol>
-<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a 
+<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a
 one-dimensional space to allocate whole nodes, sockets, or cores to jobs
 based upon other configuration parameters.</li>
-<li><b>select/linear</b> - Performs a best-fit algorithm based upon a 
+<li><b>select/linear</b> - Performs a best-fit algorithm based upon a
 one-dimensional space to allocate whole nodes to jobs.</li>
 </ol>
 
-<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to 
-allocate resources physically nearby in three-dimensional space, the 
-nodes be specified in SLURM's <i>slurm.conf</i> configuration file in 
+<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to
+allocate resources physically nearby in three-dimensional space, the
+nodes be specified in SLURM's <i>slurm.conf</i> configuration file in
 such a fashion that those nearby in <i>slurm.conf</i> (one-dimensional
-space) are also nearby in the physical three-dimensional space. 
-If the definition of the nodes in SLURM's <i>slurm.conf</i> configuration 
+space) are also nearby in the physical three-dimensional space.
+If the definition of the nodes in SLURM's <i>slurm.conf</i> configuration
 file are listed on one line (e.g. <i>NodeName=tux[000x333]</i>),
-SLURM will automatically perform that conversion using a 
+SLURM will automatically perform that conversion using a
 <a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>.
 Otherwise you may construct your own node name ordering and list them
 one node per line in <i>slurm.conf</i>.
 Note that each node must be listed exactly once and consecutive
-nodes should be nearby in three-dimensional space. 
-Also note that each node must be defined individually rather than using 
-a hostlist expression in order to preserve the ordering (there is no 
+nodes should be nearby in three-dimensional space.
+Also note that each node must be defined individually rather than using
+a hostlist expression in order to preserve the ordering (there is no
 problem using a hostlist expression in the partition specification after
 the nodes have already been defined).
-The open source code used by SLURM to generate the Hilbert curve is 
+The open source code used by SLURM to generate the Hilbert curve is
 included in the distribution at <i>contribs/skilling.c</i> in the event
 that you wish to experiment with it to generate your own node ordering.
 Two examples of SLURM configuration files are shown below:</p>
@@ -131,10 +131,10 @@ PartitionName=debug Nodes=tux[000x111] Default=Yes State=UP
 </pre>
 
 <p>In both of the examples above, the node names output by the
-<i>scontrol show nodes</i> will be ordered as defined (sequentially 
+<i>scontrol show nodes</i> will be ordered as defined (sequentially
 along the Hilbert curve or per the ordering in the <i>slurm.conf</i> file)
-rather than in numeric order (e.g. "tux001" follows "tux101" rather 
-than "tux000"). 
+rather than in numeric order (e.g. "tux001" follows "tux101" rather
+than "tux000").
 SLURM partitions should contain nodes which are defined sequentially
 by that ordering for optimal performance.</p>
 
diff --git a/doc/html/crypto_plugins.shtml b/doc/html/crypto_plugins.shtml
index 86b04f8ac4377cb0f5c813d1e5187fa89aae4fa3..c7756a8878f9a83673f92c20698c9d55b2b9bec5 100644
--- a/doc/html/crypto_plugins.shtml
+++ b/doc/html/crypto_plugins.shtml
@@ -3,69 +3,69 @@
 <h1><a name="top">SLURM Cryptographic Plugin Programmer Guide</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM cryptographic plugins and the API that 
-defines them. 
-It is intended as a resource to programmers wishing to write their own 
-SLURM cryptographic plugins. 
+<p> This document describes SLURM cryptographic plugins and the API that
+defines them.
+It is intended as a resource to programmers wishing to write their own
+SLURM cryptographic plugins.
 This is version 0 of the API.</p>
 
-<p>SLURM cryptographic plugins are SLURM plugins that implement 
-a digital signature mechanism. 
-The slurmctld daemon generates a job step credential, signs it, 
-and transmits it to an srun program. 
-The srun program then transmits it to the slurmd daemons directly. 
-The slurmctld daemon does not communicate directly with the slurmd 
-daemons at this time for performance reasons, but the job step 
-credential must be validated by the slurmd daemon as being 
-generated by the slurmctld daemon. 
+<p>SLURM cryptographic plugins are SLURM plugins that implement
+a digital signature mechanism.
+The slurmctld daemon generates a job step credential, signs it,
+and transmits it to an srun program.
+The srun program then transmits it to the slurmd daemons directly.
+The slurmctld daemon does not communicate directly with the slurmd
+daemons at this time for performance reasons, but the job step
+credential must be validated by the slurmd daemon as being
+generated by the slurmctld daemon.
 Digital signatures provide this validation mechanism.
-The plugins must conform to the SLURM Plugin API with the following 
+The plugins must conform to the SLURM Plugin API with the following
 specifications:</p>
 
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;crypto.&quot; 
-The minor type can be any recognizable abbreviation for the type of 
-cryptographic mechanism. 
+The major type must be &quot;crypto.&quot;
+The minor type can be any recognizable abbreviation for the type of
+cryptographic mechanism.
 We recommend, for example:</p>
 <ul>
 <li><b>munge</b>&#151;LLNL's Munge system.</li>
 <li><b>openssl</b>&#151;Open SSL.</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for 
-cryptographic support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for
+cryptographic support.
 Note carefully, however, the versioning discussion below.</p>
 
 <h2>Data Objects</h2>
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <span class="commandline">errno</span> to allow SLURM to discover 
-as practically as possible the reason for any failed API call. 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <span class="commandline">errno</span> to allow SLURM to discover
+as practically as possible the reason for any failed API call.
 Plugin-specific enumerated integer values may be used when appropriate.
 
-<p>These values must not be used as return values in integer-valued 
-functions in the API. 
-The proper error return value from integer-valued functions is SLURM_ERROR. 
-The implementation should endeavor to provide useful and pertinent 
-information by whatever means is practical. 
-Successful API calls are not required to reset any errno to a known value. 
-However, the initial value of any errno, prior to any error condition 
+<p>These values must not be used as return values in integer-valued
+functions in the API.
+The proper error return value from integer-valued functions is SLURM_ERROR.
+The implementation should endeavor to provide useful and pertinent
+information by whatever means is practical.
+Successful API calls are not required to reset any errno to a known value.
+However, the initial value of any errno, prior to any error condition
 arising, should be SLURM_SUCCESS. </p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. 
+<p>The following functions must appear.
 Functions which are not implemented should be stubbed.</p>
 
 
 <p class="commandline">void * crypto_read_private_key (const char *path);</p>
 <p style="margin-left:.2in"><b>Description</b>: Generate a private key
 based upon the contents of the supplied file.</p>
-<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline">path</span>&nbsp; 
+<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline">path</span>&nbsp;
 &nbsp;&nbsp;(input) fully-qualified pathname to the private key
 as specified by the <b>JobCredentialPrivateKey</b> configuration parameter.</p>
-<p style="margin-left:.2in"><b>Returns</b>: The pointer to a key on 
+<p style="margin-left:.2in"><b>Returns</b>: The pointer to a key on
 success or NULL on failure.
 Call crypto_destroy_key() to release memory associated with this key.</p>
 
@@ -73,51 +73,51 @@ Call crypto_destroy_key() to release memory associated with this key.</p>
 <p class="commandline">void * crypto_read_public_key (const char *path);</p>
 <p style="margin-left:.2in"><b>Description</b>: Generate a public key
 based upon the contents of the supplied file.</p>
-<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline">path</span>&nbsp;     
+<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline">path</span>&nbsp;
 &nbsp;&nbsp;(input) fully-qualified pathname to the public key
-as specified by the <b>JobCredentialPublicCertificate</b> configuration 
+as specified by the <b>JobCredentialPublicCertificate</b> configuration
 parameter.</p>
-<p style="margin-left:.2in"><b>Returns</b>: The pointer to a key on 
+<p style="margin-left:.2in"><b>Returns</b>: The pointer to a key on
 success or NULL on failure.
 Call crypto_destroy_key() to release memory associated with this key.</p>
 
 
 <p class="commandline">void crypto_destroy_key (void *key);</p>
-<p style="margin-left:.2in"><b>Description</b>: Release storage for 
+<p style="margin-left:.2in"><b>Description</b>: Release storage for
 a public or private key.</p>
 <p style="margin-left:.2in"><b>Argument</b>:<span class="commandline"> key</span>&nbsp;
-&nbsp;&nbsp;(input/output) pointer to the key previously allocated 
-by crypto_read_private_key() or crypto_read_public_key().</p> 
+&nbsp;&nbsp;(input/output) pointer to the key previously allocated
+by crypto_read_private_key() or crypto_read_public_key().</p>
 
 
 <p class="commandline">char *crypto_str_error(void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Return a string 
+<p style="margin-left:.2in"><b>Description</b>: Return a string
 describing the last error generated by the the cryptographic software.</p>
 <p style="margin-left:.2in"><b>Returns</b>: A pointer to a string.</p>
-                                                                                                                       
+
 <p class="commandline">int crypto_sign (void *key, char *buffer, int buf_size,
 char **sig_pp, unsigned int *sig_size_p);</p>
-<p style="margin-left:.2in"><b>Description</b>: Generate a signature for 
+<p style="margin-left:.2in"><b>Description</b>: Generate a signature for
 the supplied buffer.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:</br>
 <span class="commandline"> key</span>&nbsp;
-&nbsp;&nbsp;(input) pointer to the key previously generated by 
+&nbsp;&nbsp;(input) pointer to the key previously generated by
 crypto_read_private_key() or crypto_read_public_key().<br>
-<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input) data to 
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input) data to
 be signed.<br>
-<span class="commandline"> buf_size</span>&nbsp; &nbsp;&nbsp;(input) 
+<span class="commandline"> buf_size</span>&nbsp; &nbsp;&nbsp;(input)
 size of buffer, in bytes.<br>
 <span class="commandline"> sig_pp</span>&nbsp; &nbsp;&nbsp;(input/output)
-Location in which to store the signature. NOTE: The storage for 
-sig_pp should be allocated using xmalloc() and will be freed by 
+Location in which to store the signature. NOTE: The storage for
+sig_pp should be allocated using xmalloc() and will be freed by
 the caller using xfree().<br>
 <span class="commandline"> sig_size_p</span>&nbsp; &nbsp;&nbsp;(input/output)
 Location in which to store the size of the signature (sig_pp).</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
-On failure, the plugin should return SLURM_ERROR and set the errno to an 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int crypto_verify_sign (void *key, char *buffer, 
+<p class="commandline">int crypto_verify_sign (void *key, char *buffer,
 int buf_size, char *signature, unsigned int sig_size);</p>
 <p style="margin-left:.2in"><b>Description</b>: Generate a signature for
 the supplied buffer.</p>
@@ -125,12 +125,12 @@ the supplied buffer.</p>
 <span class="commandline"> key</span>&nbsp;
 &nbsp;&nbsp;(input) pointer to the key previously generated by
 crypto_read_private_key() or crypto_read_public_key().<br>
-<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input) data 
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input) data
 previously signed by crypto_sign().<br>
 <span class="commandline"> buf_size</span>&nbsp; &nbsp;&nbsp;(input)
 size of buffer, in bytes.<br>
 <span class="commandline"> signature</span>&nbsp; &nbsp;&nbsp;(input)
-Signature as returned in sig_pp by the crypto_sign() function and 
+Signature as returned in sig_pp by the crypto_sign() function and
 to be confirmed.</br>
 <span class="commandline"> sig_size</span>&nbsp; &nbsp;&nbsp;(input)
 Size of the signature as returned in sig_size_p by crypto_sign().</p>
@@ -140,9 +140,9 @@ appropriate value to indicate the reason for failure.</p>
 
 
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM cryptographic API. 
-Future releases of SLURM may revise this API. 
-A cryptographic plugin conveys its ability to implement a particular 
+<p> This document describes version 0 of the SLURM cryptographic API.
+Future releases of SLURM may revise this API.
+A cryptographic plugin conveys its ability to implement a particular
 API version using the mechanism outlined for SLURM plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/dist_plane.shtml b/doc/html/dist_plane.shtml
index c910cb27e6a446842f6e72e7e032ffc9f88281e2..57747d5b57d17d710e4c36ee4c3471f8cfeee607 100644
--- a/doc/html/dist_plane.shtml
+++ b/doc/html/dist_plane.shtml
@@ -2,11 +2,11 @@
 
 <h1><a name="top">Plane distribution: <i>-m plane=plane_size</i></a></h1>
 
-<p>The plane distribution allocates tasks in blocks of size 
+<p>The plane distribution allocates tasks in blocks of size
 plane_size in a round-robin fashion across allocated nodes.
 
 <p>To specify the plane distribution add to the srun command line
-<i>--distribution=plane=plane_size</i> or <i>-m plane=plane_size</i> 
+<i>--distribution=plane=plane_size</i> or <i>-m plane=plane_size</i>
 where <i>plane_size</i> is the requested plane/block size.
 
 <h2>Examples of plane distributions</h2>
@@ -38,7 +38,7 @@ following allocation of the task ids:
 </center>
 
 <p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=2 <...>
-</i>. 
+</i>.
 
 <p>The plane distribution with a plane_size of 2 results in the
 following allocation of the task ids:
@@ -61,7 +61,7 @@ error message:
 <pre>
 if ((n/plane_size < min_nodes) ((N-1)*plane_size >= n))
             generate an error message: Too few processes for the
-	           requested {plane, node} distribution.  
+	           requested {plane, node} distribution.
 
 where min_nodes is the minimum number of nodes requested for the job.
 </pre>
@@ -81,7 +81,7 @@ affinity is available in SLURM when the task affinity plug-in is
 enabled.
 
 <p>On a dual-processor node with quad-core processors (see figure 4)
-the plane distribution results in: 
+the plane distribution results in:
 
 <ul>
 <li>One plane if the plane_size=8. In this case the processors are
@@ -99,7 +99,7 @@ schedule across the node first.</li>
 
 <p>In a multi-core/hyper-threaded environment, two planes would
 provide better locality but potentially more contention for other
-resources. 
+resources.
 
 <p>On the other hand, four planes (scheduling across processors) would
 minimize contention for cache and memory.
@@ -112,7 +112,7 @@ affinity enabled</h3>
 <p>In the examples below we assume we have 21 tasks and that the
 task list is: 0, 1, 2, 3, 4, ..., 19, 20.
 
-<p>On <u>One (1)</u> node: 
+<p>On <u>One (1)</u> node:
 <i>srun -N 1-1 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 Even thought the user specified a plane_size of 4 the final plane
 distribution results in a plane distribution with plane_size=8.
@@ -120,23 +120,23 @@ distribution results in a plane distribution with plane_size=8.
 <p>
 <center>
 <img src="plane_ex5.gif">
-<p>Figure 5: Process layout for 
+<p>Figure 5: Process layout for
 <i>srun -N 1-1 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 </center>
 
-<p>On <u>four (4)</u> nodes: 
-<i>srun -N 4-4 -n 21 -m plane=4 --cpu_bind=core <...></i>. 
+<p>On <u>four (4)</u> nodes:
+<i>srun -N 4-4 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 The plane distribution with a plane_size of 4 results in the
 following allocation of the task ids:
 
 <p>
 <center>
 <img src="plane_ex6.gif" width=600>
-<p>Figure 6: Process layout for 
+<p>Figure 6: Process layout for
 <i>srun -N 4-4 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 </center>
 
-<p>On <u>four (4)</u> nodes: 
+<p>On <u>four (4)</u> nodes:
 <i>srun -N 4-4 -n 21 -m plane=2 --cpu_bind=core <...>
 </i>. The plane distribution with a plane_size of 2 results in the
 following allocation of the task ids:
@@ -144,7 +144,7 @@ following allocation of the task ids:
 <p>
 <center>
 <img src="plane_ex7.gif" width=600>
-<p>Figure 7: Process layout for 
+<p>Figure 7: Process layout for
 <i>srun -N 4-4 -n 21 -m plane=2 --cpu_bind=core <...></i>.
 </center>
 
diff --git a/doc/html/download.shtml b/doc/html/download.shtml
index 1f72a8736e005832293f8dc34443fc485e5cb952..9e63f40529fd3627faefb9e5ebf86c8e5d793656 100644
--- a/doc/html/download.shtml
+++ b/doc/html/download.shtml
@@ -2,24 +2,24 @@
 
 <h1>Download</h1>
 <p>
-SLURM source can be downloaded from 
+SLURM source can be downloaded from
 <a href="http://sourceforge.net/projects/slurm/">
 http://sourceforge.net/projects/slurm/</a><br>
-SLURM has also been packaged for 
-<a href="http://packages.debian.org/src:slurm-llnl">Debian</a> and 
+SLURM has also been packaged for
+<a href="http://packages.debian.org/src:slurm-llnl">Debian</a> and
 <a href="http://packages.ubuntu.com/src:slurm-llnl">Ubuntu</a>
 (both named <i>slurm-llnl</i>).</p>
 
 <p>Related software available from various sources include:
 <ul>
 
-<li><b>Authentication</b> plugins identifies the user originating 
+<li><b>Authentication</b> plugins identifies the user originating
 a message.</li>
 <ul>
 <li><a href="http://www.theether.org/authd">authd</a></li>
 <li><b>Munge</b> (recommended)<br>
-In order to compile the "auth/munge" authentication plugin for SLURM, 
-you will need to build and install Munge, available from 
+In order to compile the "auth/munge" authentication plugin for SLURM,
+you will need to build and install Munge, available from
 <a href="http://home.gna.org/munge/">http://home.gna.org/munge/</a> or
 <a href="http://packages.debian.org/src:munge">Debian</a> or
 <a href="http://packages.ubuntu.com/src:munge">Ubuntu</a>.
@@ -38,14 +38,14 @@ See our <a href="accounting.html">Accounting</a> web page for more information.<
 <li><a href="http://www.totalviewtech.com/"><b>TotalView</b></a>
 is a GUI-based source code debugger well suited for parallel applications.<li>
 <li><a href="http://padb.pittman.org.uk/"><b>Padb</b></a>
-is a job inspection tool for examining and debugging parallel programs, primarily it simplifies the process of gathering stack traces but also supports a wide range of other functions. 
+is a job inspection tool for examining and debugging parallel programs, primarily it simplifies the process of gathering stack traces but also supports a wide range of other functions.
 It's an open source, non-interactive, command line, scriptable tool intended for use by programmers and system administrators alike.</li>
 </ul><br>
 
 <li><b>Digital signatures</b> (Cypto plugin) are used to insure message are not altered.</li>
 <ul>
 <li><b>Munge</b> (recommended)<br>
-Munge can be used at an alternative to OpenSSL. 
+Munge can be used at an alternative to OpenSSL.
 Munge is available under the Gnu General Public License.
 See Munge download information above.</li>
 <li><b>OpenSSL</b><br>
@@ -54,7 +54,7 @@ Download it from <a href="http://www.openssl.org/">http://www.openssl.org/</a>.<
 </ul><br>
 
 <li><b>Hostlist</b><br>
-A python program used for manipulation of SLURM hostlists including 
+A python program used for manipulation of SLURM hostlists including
 functions such as intersection and difference. Download the code from:<br>
 <a href="http://www.nsc.liu.se/~kent/python-hostlist">
 http://www.nsc.liu.se/~kent/python-hostlist</a></li><br>
@@ -63,7 +63,7 @@ http://www.nsc.liu.se/~kent/python-hostlist</a></li><br>
 <ul>
 <li><b>QsNet</b><br>
 In order to build the "switch/elan" plugin for SLURM, you will need
-the <b>qsnetlibs</b> development libraries from 
+the <b>qsnetlibs</b> development libraries from
 <a href="http://www.quadrics.com">Quadrics</a>. The Elan
 plugin also requires the <b>libelanhosts</b> library and
 a corresponding /etc/elanhosts configuration file, used to map
@@ -73,10 +73,10 @@ https://sourceforge.net/projects/slurm/</a>.
 </ul><br>
 
 <li><b>I/O Watchdog</b><br>
-A facility for monitoring user applications, most notably parallel jobs, 
-for <i>hangs</i> which typically have a side-effect of ceasing all write 
-activity. This faciltiy attempts to monitor all write activity of an 
-application and trigger a set of user-defined actions when write activity 
+A facility for monitoring user applications, most notably parallel jobs,
+for <i>hangs</i> which typically have a side-effect of ceasing all write
+activity. This faciltiy attempts to monitor all write activity of an
+application and trigger a set of user-defined actions when write activity
 as ceased for a configurable period of time. A SPANK plugin is provided
 for use with SLURM. See the README and man page in the package for more
 details. Download the latest source from:<br>
@@ -99,16 +99,16 @@ http://io-watchdog.googlecode.com/files/io-watchdog-0.6.tar.bz2</a></li><br>
 </ul><br>
 
 <li><b>PAM Modules (pam_slurm)</b><br>
-Pluggable Authentication Module (PAM) for restricting access to compute nodes 
-where SLURM performs resource management. Access to the node is restricted to 
+Pluggable Authentication Module (PAM) for restricting access to compute nodes
+where SLURM performs resource management. Access to the node is restricted to
 user root and users who have been allocated resources on that node.
-NOTE: pam_slurm is included within the SLURM distribution for version 2.1 
+NOTE: pam_slurm is included within the SLURM distribution for version 2.1
 or higher.
-For earlier SLURM versions, pam_slurm is available for download from<br> 
-<a href="https://sourceforge.net/projects/slurm/"> 
-https://sourceforge.net/projects/slurm/</a><br> 
-SLURM's PAM module has also been packaged for 
-<a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and 
+For earlier SLURM versions, pam_slurm is available for download from<br>
+<a href="https://sourceforge.net/projects/slurm/">
+https://sourceforge.net/projects/slurm/</a><br>
+SLURM's PAM module has also been packaged for
+<a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and
 <a href="http://packages.ubuntu.com/src:libpam-slurm">Ubuntu</a>
 (both named <i>libpam-slurm</i>).</li><br>
 
@@ -125,10 +125,10 @@ Moab Cluster Suite</a></li>
 
 <li><b>Scripting interfaces</b>
 <ul>
-<li>A <b>Perl</b> interface is included in the SLURM distribution in the 
+<li>A <b>Perl</b> interface is included in the SLURM distribution in the
 <i>contribs/perlapi</i> directory and packaged in the <i>perapi</i> RPM.</li>
 
-<li><a href="http://www.gingergeeks.co.uk/pyslurm/">PySlurm</a> is a 
+<li><a href="http://www.gingergeeks.co.uk/pyslurm/">PySlurm</a> is a
 Python/Pyrex module to interface with SLURM. There is also a Python module
 in the SLURM distribution to expand and collect hostlist expressions at
 <i>contribs/python/hostlist</i>.</li>
@@ -151,16 +151,16 @@ repository with the following command:<br>
 <i>svn checkout http://slurm-spank-plugins.googlecode.com/svn/trunk/ slurm-plugins</i></li><br>
 
 <li><b>PAM Module (pam_slurm)</b><br>
-Pluggable Authentication Module (PAM) for restricting access to compute nodes 
-where SLURM performs resource management. Access to the node is restricted to 
+Pluggable Authentication Module (PAM) for restricting access to compute nodes
+where SLURM performs resource management. Access to the node is restricted to
 user root and users who have been allocated resources on that node.
-NOTE: pam_slurm is included within the SLURM distribution for version 2.1 
+NOTE: pam_slurm is included within the SLURM distribution for version 2.1
 or higher.
-For earlier SLURM versions, pam_slurm is available for download from<br> 
-<a href="https://sourceforge.net/projects/slurm/"> 
-https://sourceforge.net/projects/slurm/</a><br> 
-SLURM's PAM module has also been packaged for 
-<a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and 
+For earlier SLURM versions, pam_slurm is available for download from<br>
+<a href="https://sourceforge.net/projects/slurm/">
+https://sourceforge.net/projects/slurm/</a><br>
+SLURM's PAM module has also been packaged for
+<a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and
 <a href="http://packages.ubuntu.com/src:libpam-slurm">Ubuntu</a>
 (both named <i>libpam-slurm</i>).</li><br>
 
diff --git a/doc/html/entities.gif b/doc/html/entities.gif
index 803dfa69d7c626f7b0cea45921f119e392a00e2c..f1b57f6f451acfe5eff929e900bcd02cd8a886c2 100644
Binary files a/doc/html/entities.gif and b/doc/html/entities.gif differ
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 3f4c48a208ed43a411cdedb169aa65558ad7cf81..07386e9d61c7ecec146ddd2de52fcb552eadb7e0 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -6,117 +6,117 @@
 <li><a href="#comp">Why is my job/node in COMPLETING state?</a></li>
 <li><a href="#rlimit">Why are my resource limits not propagated?</a></li>
 <li><a href="#pending">Why is my job not running?</a></li>
-<li><a href="#sharing">Why does the srun --overcommit option not permit 
+<li><a href="#sharing">Why does the srun --overcommit option not permit
   multiple jobs to run on nodes?</a></li>
 <li><a href="#purge">Why is my job killed prematurely?</a></li>
 <li><a href="#opts">Why are my srun options ignored?</a></li>
-<li><a href="#backfill">Why is the SLURM backfill scheduler not starting my 
+<li><a href="#backfill">Why is the SLURM backfill scheduler not starting my
   job?</a></li>
-<li><a href="#steps">How can I run multiple jobs from within a single 
+<li><a href="#steps">How can I run multiple jobs from within a single
   script?</a></li>
-<li><a href="#orphan">Why do I have job steps when my job has already 
+<li><a href="#orphan">Why do I have job steps when my job has already
   COMPLETED?</a></li>
-<li><a href="#multi_batch">How can I run a job within an existing job 
+<li><a href="#multi_batch">How can I run a job within an existing job
   allocation?</a></li>
-<li><a href="#user_env">How does SLURM establish the environment for my 
+<li><a href="#user_env">How does SLURM establish the environment for my
   job?</a></li>
 <li><a href="#prompt">How can I get shell prompts in interactive mode?</a></li>
 <li><a href="#batch_out">How can I get the task ID in the output or error file
   name for a batch job?</a></li>
-<li><a href="#parallel_make">Can the <i>make</i> command utilize the resources 
+<li><a href="#parallel_make">Can the <i>make</i> command utilize the resources
   allocated to a SLURM job?</a></li>
 <li><a href="#terminal">Can tasks be launched with a remote terminal?</a></li>
-<li><a href="#force">What does &quot;srun: Force Terminated job&quot; 
+<li><a href="#force">What does &quot;srun: Force Terminated job&quot;
   indicate?</a></li>
-<li><a href="#early_exit">What does this mean: &quot;srun: First task exited 
+<li><a href="#early_exit">What does this mean: &quot;srun: First task exited
   30s ago&quot; followed by &quot;srun Job Failed&quot;?</a></li>
-<li><a href="#memlock">Why is my MPI job  failing due to the locked memory 
+<li><a href="#memlock">Why is my MPI job  failing due to the locked memory
   (memlock) limit being too low?</a></li>
-<li><a href="#inactive">Why is my batch job that launches no job steps being 
+<li><a href="#inactive">Why is my batch job that launches no job steps being
   killed?</a></li>
 <li><a href="#arbitrary">How do I run specific tasks on certain nodes
-  in my allocation?</a></li> 
-<li><a href="#hold">How can I temporarily prevent a job from running 
+  in my allocation?</a></li>
+<li><a href="#hold">How can I temporarily prevent a job from running
   (e.g. place it into a <i>hold</i> state)?</a></li>
-<li><a href="#mem_limit">Why are jobs not getting the appropriate 
+<li><a href="#mem_limit">Why are jobs not getting the appropriate
   memory limit?</a></li>
 </ol>
 
 <h2>For Administrators</h2>
 <ol>
 <li><a href="#suspend">How is job suspend/resume useful?</a></li>
-<li><a href="#fast_schedule">How can I configure SLURM to use the resources 
-  actually found on a node rather than what is defined in 
+<li><a href="#fast_schedule">How can I configure SLURM to use the resources
+  actually found on a node rather than what is defined in
   <i>slurm.conf</i>?</a></li>
-<li><a href="#return_to_service">Why is a node shown in state DOWN when the node 
+<li><a href="#return_to_service">Why is a node shown in state DOWN when the node
   has registered for service?</a></li>
 <li><a href="#down_node">What happens when a node crashes?</a></li>
-<li><a href="#multi_job">How can I control the execution of multiple 
+<li><a href="#multi_job">How can I control the execution of multiple
   jobs per node?</a></li>
 <li><a href="#inc_plugin">When the SLURM daemon starts, it prints
   &quot;cannot resolve X plugin operations&quot; and exits. What does this mean?</a></li>
 <li><a href="#sigpipe">Why are user tasks intermittently dying at launch with SIGPIPE
   error messages?</a></li>
-<li><a href="#maint_time">How can I dry up the workload for a maintenance 
+<li><a href="#maint_time">How can I dry up the workload for a maintenance
   period?</a></li>
-<li><a href="#pam">How can PAM be used to control a user's limits on or 
+<li><a href="#pam">How can PAM be used to control a user's limits on or
   access to compute nodes?</a></li>
-<li><a href="#time">Why are jobs allocated nodes and then unable to initiate 
+<li><a href="#time">Why are jobs allocated nodes and then unable to initiate
   programs on some nodes?</a></li>
 <li><a href="#ping"> Why does <i>slurmctld</i> log that some nodes
   are not responding even if they are not in any partition?</a></li>
 <li><a href="#controller"> How should I relocated the primary or backup
   controller?</a></li>
-<li><a href="#multi_slurm">Can multiple SLURM systems be run in 
+<li><a href="#multi_slurm">Can multiple SLURM systems be run in
   parallel for testing purposes?</a></li>
 <li><a href="#multi_slurmd">Can slurm emulate a larger cluster?</a></li>
-<li><a href="#extra_procs">Can SLURM emulate nodes with more 
+<li><a href="#extra_procs">Can SLURM emulate nodes with more
   resources than physically exist on the node?</a></li>
-<li><a href="#credential_replayed">What does a 
-  &quot;credential replayed&quot; error in the <i>SlurmdLogFile</i> 
+<li><a href="#credential_replayed">What does a
+  &quot;credential replayed&quot; error in the <i>SlurmdLogFile</i>
   indicate?</a></li>
-<li><a href="#large_time">What does 
-  &quot;Warning: Note very large processing time&quot; 
+<li><a href="#large_time">What does
+  &quot;Warning: Note very large processing time&quot;
   in the <i>SlurmctldLogFile</i> indicate?</a></li>
 <li><a href="#lightweight_core">How can I add support for lightweight
   core files?</a></li>
 <li><a href="#limit_propagation">Is resource limit propagation
   useful on a homogeneous cluster?</a></li>
-<li<a href="#clock">Do I need to maintain synchronized clocks 
+<li<a href="#clock">Do I need to maintain synchronized clocks
   on the cluster?</a></li>
-<li><a href="#cred_invalid">Why are &quot;Invalid job credential&quot; errors 
+<li><a href="#cred_invalid">Why are &quot;Invalid job credential&quot; errors
   generated?</a></li>
-<li><a href="#cred_replay">Why are 
-  &quot;Task launch failed on node ... Job credential replayed&quot; 
+<li><a href="#cred_replay">Why are
+  &quot;Task launch failed on node ... Job credential replayed&quot;
   errors generated?</a></li>
 <li><a href="#globus">Can SLURM be used with Globus?</li>
 <li><a href="#time_format">Can SLURM time output format include the year?</li>
-<li><a href="#file_limit">What causes the error 
+<li><a href="#file_limit">What causes the error
   &quot;Unable to accept new connection: Too many open files&quot;?</li>
-<li><a href="#slurmd_log">Why does the setting of <i>SlurmdDebug</i> fail 
+<li><a href="#slurmd_log">Why does the setting of <i>SlurmdDebug</i> fail
   to log job step information at the appropriate level?</li>
-<li><a href="#rpm">Why isn't the auth_none.so (or other file) in a 
+<li><a href="#rpm">Why isn't the auth_none.so (or other file) in a
   SLURM RPM?</li>
 <li><a href="#slurmdbd">Why should I use the slurmdbd instead of the
   regular database plugins?</li>
 <li><a href="#debug">How can I build SLURM with debugging symbols?</li>
-<li><a href="#state_preserve">How can I easily preserve drained node 
+<li><a href="#state_preserve">How can I easily preserve drained node
   information between major SLURM updates?</li>
 <li><a href="#health_check">Why doesn't the <i>HealthCheckProgram</i>
   execute on DOWN nodes?</li>
-<li><a href="#batch_lost">What is the meaning of the error 
+<li><a href="#batch_lost">What is the meaning of the error
   &quot;Batch JobId=# missing from master node, killing it&quot;?</a></li>
 <li><a href="#accept_again">What does the messsage
-  &quot;srun: error: Unable to accept connection: Resources temporarily unavailable&quot; 
+  &quot;srun: error: Unable to accept connection: Resources temporarily unavailable&quot;
   indicate?</a></li>
-<li><a href="#task_prolog">How could I automatically print a job's 
+<li><a href="#task_prolog">How could I automatically print a job's
   SLURM job ID to its standard output?</li>
 <li><a href="#moab_start">I run SLURM with the Moab or Maui scheduler.
   How can I start a job under SLURM without the scheduler?</li>
 <li><a href="#orphan_procs">Why are user processes and <i>srun</i>
   running even though the job is supposed to be completed?</li>
 <li><a href="#slurmd_oom">How can I prevent the <i>slurmd</i> and
-  <i>slurmstepd</i> daemons from being killed when a node's memory 
+  <i>slurmstepd</i> daemons from being killed when a node's memory
   is exhausted?</li>
 <li><a href="#ubuntu">I see my host of my calling node as 127.0.1.1
   instead of the correct IP address.  Why is that?</a></li>
@@ -128,55 +128,55 @@
 
 <h2>For Users</h2>
 <p><a name="comp"><b>1. Why is my job/node in COMPLETING state?</b></a><br>
-When a job is terminating, both the job and its nodes enter the COMPLETING state. 
-As the SLURM daemon on each node determines that all processes associated with 
-the job have terminated, that node changes state to IDLE or some other appropriate 
-state for use by other jobs. 
-When every node allocated to a job has determined that all processes associated 
-with it have terminated, the job changes state to COMPLETED or some other 
-appropriate state (e.g. FAILED). 
-Normally, this happens within a second. 
+When a job is terminating, both the job and its nodes enter the COMPLETING state.
+As the SLURM daemon on each node determines that all processes associated with
+the job have terminated, that node changes state to IDLE or some other appropriate
+state for use by other jobs.
+When every node allocated to a job has determined that all processes associated
+with it have terminated, the job changes state to COMPLETED or some other
+appropriate state (e.g. FAILED).
+Normally, this happens within a second.
 However, if the job has processes that cannot be terminated with a SIGKILL
-signal, the job and one or more nodes can remain in the COMPLETING state 
-for an extended period of time. 
-This may be indicative of processes hung waiting for a core file 
-to complete I/O or operating system failure. 
-If this state persists, the system administrator should check for processes 
-associated with the job that cannot be terminated then use the 
-<span class="commandline">scontrol</span> command to change the node's 
-state to DOWN (e.g. &quot;scontrol update NodeName=<i>name</i> State=DOWN Reason=hung_completing&quot;), 
-reboot the node, then reset the node's state to IDLE 
+signal, the job and one or more nodes can remain in the COMPLETING state
+for an extended period of time.
+This may be indicative of processes hung waiting for a core file
+to complete I/O or operating system failure.
+If this state persists, the system administrator should check for processes
+associated with the job that cannot be terminated then use the
+<span class="commandline">scontrol</span> command to change the node's
+state to DOWN (e.g. &quot;scontrol update NodeName=<i>name</i> State=DOWN Reason=hung_completing&quot;),
+reboot the node, then reset the node's state to IDLE
 (e.g. &quot;scontrol update NodeName=<i>name</i> State=RESUME&quot;).
-Note that setting the node DOWN will terminate all running or suspended 
-jobs associated with that node. 
-An alternative is to set the node's state to DRAIN until all jobs 
+Note that setting the node DOWN will terminate all running or suspended
+jobs associated with that node.
+An alternative is to set the node's state to DRAIN until all jobs
 associated with it terminate before setting it DOWN and re-booting.</p>
-<p>Note that SLURM has two configuration parameters that may be used to 
+<p>Note that SLURM has two configuration parameters that may be used to
 automate some of this process.
-<i>UnkillableStepProgram</i> specifies a program to execute when 
+<i>UnkillableStepProgram</i> specifies a program to execute when
 non-killable processes are identified.
 <i>UnkillableStepTimeout</i> specifies how long to wait for processes
-to terminate. 
+to terminate.
 See the "man slurm.conf" for more information about these parameters.</p>
 
 <p><a name="rlimit"><b>2. Why are my resource limits not propagated?</b></a><br>
-When the <span class="commandline">srun</span> command executes, it captures the 
-resource limits in effect at submit time. These limits are propagated to the allocated 
-nodes before initiating the user's job. The SLURM daemon running on that node then 
-tries to establish identical resource limits for the job being initiated. 
-There are several possible reasons for not being able to establish those 
+When the <span class="commandline">srun</span> command executes, it captures the
+resource limits in effect at submit time. These limits are propagated to the allocated
+nodes before initiating the user's job. The SLURM daemon running on that node then
+tries to establish identical resource limits for the job being initiated.
+There are several possible reasons for not being able to establish those
 resource limits.
-<ul> 
-<li>The hard resource limits applied to SLURM's slurmd daemon are lower 
-than the user's soft resources limits on the submit host. Typically 
-the slurmd daemon is initiated by the init daemon with the operating 
-system default limits. This may be addressed either through use of the 
+<ul>
+<li>The hard resource limits applied to SLURM's slurmd daemon are lower
+than the user's soft resources limits on the submit host. Typically
+the slurmd daemon is initiated by the init daemon with the operating
+system default limits. This may be addressed either through use of the
 ulimit command in the /etc/sysconfig/slurm file or enabling
 <a href="#pam">PAM in SLURM</a>.</li>
-<li>The user's hard resource limits on the allocated node are lower than 
-the same user's soft hard resource limits on the node from which the 
-job was submitted. It is recommended that the system administrator 
-establish uniform hard resource limits for users on all nodes 
+<li>The user's hard resource limits on the allocated node are lower than
+the same user's soft hard resource limits on the node from which the
+job was submitted. It is recommended that the system administrator
+establish uniform hard resource limits for users on all nodes
 within a cluster to prevent this from occurring.</li>
 </ul></p>
 <p>NOTE: This may produce the error message &quot;Can't propagate RLIMIT_...&quot;.
@@ -185,98 +185,98 @@ the resource limit should be propagated or the srun command is running
 with verbose logging of actions from the slurmd daemon (e.g. "srun -d6 ...").</p>
 
 <p><a name="pending"><b>3. Why is my job not running?</b></a><br>
-The answer to this question depends upon the scheduler used by SLURM. Executing 
+The answer to this question depends upon the scheduler used by SLURM. Executing
 the command</p>
-<blockquote> 
+<blockquote>
 <p> <span class="commandline">scontrol show config | grep SchedulerType</span></p>
 </blockquote>
-<p> will supply this information. If the scheduler type is <b>builtin</b>, then 
-jobs will be executed in the order of submission for a given partition. Even if 
-resources are available to initiate your job immediately, it will be deferred 
-until no previously submitted job is pending. If the scheduler type is <b>backfill</b>, 
-then jobs will generally be executed in the order of submission for a given partition 
-with one exception: later submitted jobs will be initiated early if doing so does 
-not delay the expected execution time of an earlier submitted job. In order for 
+<p> will supply this information. If the scheduler type is <b>builtin</b>, then
+jobs will be executed in the order of submission for a given partition. Even if
+resources are available to initiate your job immediately, it will be deferred
+until no previously submitted job is pending. If the scheduler type is <b>backfill</b>,
+then jobs will generally be executed in the order of submission for a given partition
+with one exception: later submitted jobs will be initiated early if doing so does
+not delay the expected execution time of an earlier submitted job. In order for
 backfill scheduling to be effective, users' jobs should specify reasonable time
-limits. If jobs do not specify time limits, then all jobs will receive the same 
-time limit (that associated with the partition), and the ability to backfill schedule 
-jobs will be limited. The backfill scheduler does not alter job specifications 
-of required or excluded nodes, so jobs which specify nodes will substantially 
+limits. If jobs do not specify time limits, then all jobs will receive the same
+time limit (that associated with the partition), and the ability to backfill schedule
+jobs will be limited. The backfill scheduler does not alter job specifications
+of required or excluded nodes, so jobs which specify nodes will substantially
 reduce the effectiveness of backfill scheduling. See the <a href="#backfill">
-backfill</a> section for more details. If the scheduler type is <b>wiki</b>, 
-this represents 
+backfill</a> section for more details. If the scheduler type is <b>wiki</b>,
+this represents
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-The Maui Scheduler</a> or 
+The Maui Scheduler</a> or
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
-Moab Cluster Suite</a>. 
-Please refer to its documentation for help. For any scheduler, you can check priorities 
+Moab Cluster Suite</a>.
+Please refer to its documentation for help. For any scheduler, you can check priorities
 of jobs using the command <span class="commandline">scontrol show job</span>.</p>
 
-<p><a name="sharing"><b>4. Why does the srun --overcommit option not permit multiple jobs 
+<p><a name="sharing"><b>4. Why does the srun --overcommit option not permit multiple jobs
 to run on nodes?</b></a><br>
-The <b>--overcommit</b> option is a means of indicating that a job or job step is willing 
-to execute more than one task per processor in the job's allocation. For example, 
-consider a cluster of two processor nodes. The srun execute line may be something 
+The <b>--overcommit</b> option is a means of indicating that a job or job step is willing
+to execute more than one task per processor in the job's allocation. For example,
+consider a cluster of two processor nodes. The srun execute line may be something
 of this sort</p>
 <blockquote>
 <p><span class="commandline">srun --ntasks=4 --nodes=1 a.out</span></p>
 </blockquote>
-<p>This will result in not one, but two nodes being allocated so that each of the four 
-tasks is given its own processor. Note that the srun <b>--nodes</b> option specifies 
+<p>This will result in not one, but two nodes being allocated so that each of the four
+tasks is given its own processor. Note that the srun <b>--nodes</b> option specifies
 a minimum node count and optionally a maximum node count. A command line of</p>
 <blockquote>
 <p><span class="commandline">srun --ntasks=4 --nodes=1-1 a.out</span></p>
 </blockquote>
-<p>would result in the request being rejected. If the <b>--overcommit</b> option 
-is added to either command line, then only one node will be allocated for all 
+<p>would result in the request being rejected. If the <b>--overcommit</b> option
+is added to either command line, then only one node will be allocated for all
 four tasks to use.</p>
-<p>More than one job can execute simultaneously on the same nodes through the use 
-of srun's <b>--shared</b> option in conjunction with the <b>Shared</b> parameter 
-in SLURM's partition configuration. See the man pages for srun and slurm.conf for 
+<p>More than one job can execute simultaneously on the same nodes through the use
+of srun's <b>--shared</b> option in conjunction with the <b>Shared</b> parameter
+in SLURM's partition configuration. See the man pages for srun and slurm.conf for
 more information.</p>
 
 <p><a name="purge"><b>5. Why is my job killed prematurely?</b></a><br>
 SLURM has a job purging mechanism to remove inactive jobs (resource allocations)
 before reaching its time limit, which could be infinite.
-This inactivity time limit is configurable by the system administrator. 
+This inactivity time limit is configurable by the system administrator.
 You can check its value with the command</p>
 <blockquote>
 <p><span class="commandline">scontrol show config | grep InactiveLimit</span></p>
 </blockquote>
-<p>The value of InactiveLimit is in seconds. 
-A zero value indicates that job purging is disabled. 
-A job is considered inactive if it has no active job steps or if the srun 
+<p>The value of InactiveLimit is in seconds.
+A zero value indicates that job purging is disabled.
+A job is considered inactive if it has no active job steps or if the srun
 command creating the job is not responding.
-In the case of a batch job, the srun command terminates after the job script 
-is submitted. 
+In the case of a batch job, the srun command terminates after the job script
+is submitted.
 Therefore batch job pre- and post-processing is limited to the InactiveLimit.
-Contact your system administrator if you believe the InactiveLimit value 
-should be changed. 
+Contact your system administrator if you believe the InactiveLimit value
+should be changed.
 
 <p><a name="opts"><b>6. Why are my srun options ignored?</b></a><br>
-Everything after the command <span class="commandline">srun</span> is 
-examined to determine if it is a valid option for srun. The first 
-token that is not a valid option for srun is considered the command 
-to execute and everything after that is treated as an option to 
+Everything after the command <span class="commandline">srun</span> is
+examined to determine if it is a valid option for srun. The first
+token that is not a valid option for srun is considered the command
+to execute and everything after that is treated as an option to
 the command. For example:</p>
 <blockquote>
 <p><span class="commandline">srun -N2 hostname -pdebug</span></p>
 </blockquote>
-<p>srun processes "-N2" as an option to itself. "hostname" is the 
-command to execute and "-pdebug" is treated as an option to the 
-hostname command. This will change the name of the computer 
-on which SLURM executes the command - Very bad, <b>Don't run 
+<p>srun processes "-N2" as an option to itself. "hostname" is the
+command to execute and "-pdebug" is treated as an option to the
+hostname command. This will change the name of the computer
+on which SLURM executes the command - Very bad, <b>Don't run
 this command as user root!</b></p>
 
 <p><a name="backfill"><b>7. Why is the SLURM backfill scheduler not starting my job?
 </b></a><br>
-There are significant limitations in the current backfill scheduler plugin. 
+There are significant limitations in the current backfill scheduler plugin.
 It was designed to perform backfill node scheduling for a homogeneous cluster.
-It does not manage scheduling on individual processors (or other consumable 
-resources). It does not update the required or excluded node list of 
-individual jobs. It does support job's with constraints/features unless 
-the exclusive OR operator is used in the constraint expression. 
-You can use the scontrol show command to check if these conditions apply.</p> 
+It does not manage scheduling on individual processors (or other consumable
+resources). It does not update the required or excluded node list of
+individual jobs. It does support job's with constraints/features unless
+the exclusive OR operator is used in the constraint expression.
+You can use the scontrol show command to check if these conditions apply.</p>
 <ul>
 <li>Partition: State=UP</li>
 <li>Partition: RootOnly=NO</li>
@@ -285,80 +285,80 @@ You can use the scontrol show command to check if these conditions apply.</p>
 <li>Job: ExcNodeList=NULL</li>
 <li>Job: Contiguous=0</li>
 <li>Job: Features=NULL</li>
-<li>Job: MinProcs, MinMemory, and MinTmpDisk satisfied by all nodes in 
+<li>Job: MinProcs, MinMemory, and MinTmpDisk satisfied by all nodes in
 the partition</li>
 <li>Job: MinProcs or MinNodes not to exceed partition's MaxNodes</li>
 </ul>
-<p>If the partitions specifications differ from those listed above, 
-no jobs in that partition will be scheduled by the backfills scheduler. 
+<p>If the partitions specifications differ from those listed above,
+no jobs in that partition will be scheduled by the backfills scheduler.
 Their jobs will only be scheduled on a First-In-First-Out (FIFO) basis.</p>
-<p>Jobs failing to satisfy the requirements above (i.e. with specific 
-node requirements) will not be considered candidates for backfill 
-scheduling and other jobs may be scheduled ahead of these jobs. 
-These jobs are subject to starvation, but will not block other 
+<p>Jobs failing to satisfy the requirements above (i.e. with specific
+node requirements) will not be considered candidates for backfill
+scheduling and other jobs may be scheduled ahead of these jobs.
+These jobs are subject to starvation, but will not block other
 jobs from running when sufficient resources are available for them.</p>
 
-<p><a name="steps"><b>8. How can I run multiple jobs from within a 
+<p><a name="steps"><b>8. How can I run multiple jobs from within a
 single script?</b></a><br>
-A SLURM job is just a resource allocation. You can execute many 
-job steps within that allocation, either in parallel or sequentially. 
-Some jobs actually launch thousands of job steps this way. The job 
-steps will be allocated nodes that are not already allocated to 
-other job steps. This essential provides a second level of resource 
+A SLURM job is just a resource allocation. You can execute many
+job steps within that allocation, either in parallel or sequentially.
+Some jobs actually launch thousands of job steps this way. The job
+steps will be allocated nodes that are not already allocated to
+other job steps. This essential provides a second level of resource
 management within the job for the job steps.</p>
 
-<p><a name="orphan"><b>9. Why do I have job steps when my job has 
+<p><a name="orphan"><b>9. Why do I have job steps when my job has
 already COMPLETED?</b></a><br>
-NOTE: This only applies to systems configured with 
+NOTE: This only applies to systems configured with
 <i>SwitchType=switch/elan</i> or <i>SwitchType=switch/federation</i>.
 All other systems will purge all job steps on job completion.</p>
-<p>SLURM maintains switch (network interconnect) information within 
-the job step for Quadrics Elan and IBM Federation switches. 
-This information must be maintained until we are absolutely certain 
-that the processes associated with the switch have been terminated 
-to avoid the possibility of re-using switch resources for other 
+<p>SLURM maintains switch (network interconnect) information within
+the job step for Quadrics Elan and IBM Federation switches.
+This information must be maintained until we are absolutely certain
+that the processes associated with the switch have been terminated
+to avoid the possibility of re-using switch resources for other
 jobs (even on different nodes).
-SLURM considers jobs COMPLETED when all nodes allocated to the 
+SLURM considers jobs COMPLETED when all nodes allocated to the
 job are either DOWN or confirm termination of all its processes.
-This enables SLURM to purge job information in a timely fashion 
+This enables SLURM to purge job information in a timely fashion
 even when there are many failing nodes.
 Unfortunately the job step information may persist longer.</p>
 
 <p><a name="multi_batch"><b>10. How can I run a job within an existing
 job allocation?</b></a><br>
-There is a srun option <i>--jobid</i> that can be used to specify 
-a job's ID. 
-For a batch job or within an existing resource allocation, the 
-environment variable <i>SLURM_JOB_ID</i> has already been defined, 
-so all job steps will run within that job allocation unless 
+There is a srun option <i>--jobid</i> that can be used to specify
+a job's ID.
+For a batch job or within an existing resource allocation, the
+environment variable <i>SLURM_JOB_ID</i> has already been defined,
+so all job steps will run within that job allocation unless
 otherwise specified.
-The one exception to this is when submitting batch jobs. 
-When a batch job is submitted from within an existing batch job, 
-it is treated as a new job allocation request and will get a 
-new job ID unless explicitly set with the <i>--jobid</i> option. 
-If you specify that a batch job should use an existing allocation, 
-that job allocation will be released upon the termination of 
+The one exception to this is when submitting batch jobs.
+When a batch job is submitted from within an existing batch job,
+it is treated as a new job allocation request and will get a
+new job ID unless explicitly set with the <i>--jobid</i> option.
+If you specify that a batch job should use an existing allocation,
+that job allocation will be released upon the termination of
 that batch job.</p>
 
-<p><a name="user_env"><b>11. How does SLURM establish the environment 
+<p><a name="user_env"><b>11. How does SLURM establish the environment
 for my job?</b></a><br>
-SLURM processes are not run under a shell, but directly exec'ed 
-by the <i>slurmd</i> daemon (assuming <i>srun</i> is used to launch 
+SLURM processes are not run under a shell, but directly exec'ed
+by the <i>slurmd</i> daemon (assuming <i>srun</i> is used to launch
 the processes).
-The environment variables in effect at the time the <i>srun</i> command 
-is executed are propagated to the spawned processes. 
-The <i>~/.profile</i> and <i>~/.bashrc</i> scripts are not executed 
+The environment variables in effect at the time the <i>srun</i> command
+is executed are propagated to the spawned processes.
+The <i>~/.profile</i> and <i>~/.bashrc</i> scripts are not executed
 as part of the process launch.</p>
 
-<p><a name="prompt"><b>12. How can I get shell prompts in interactive 
+<p><a name="prompt"><b>12. How can I get shell prompts in interactive
 mode?</b></a><br>
 <i>srun -u bash -i</i><br>
 Srun's <i>-u</i> option turns off buffering of stdout.
 Bash's <i>-i</i> option tells it to run in interactive mode (with prompts).
 
-<p><a name="batch_out"><b>13. How can I get the task ID in the output 
+<p><a name="batch_out"><b>13. How can I get the task ID in the output
 or error file name for a batch job?</b></a><br>
-<p>If you want separate output by task, you will need to build a script 
+<p>If you want separate output by task, you will need to build a script
 containing this specification. For example:</p>
 <pre>
 $ cat test
@@ -388,9 +388,9 @@ tdev2
 
 <p><a name="parallel_make"><b>14. Can the <i>make</i> command
 utilize the resources allocated to a SLURM job?</b></a><br>
-Yes. There is a patch available for GNU make version 3.81 
-available as part of the SLURM distribution in the file 
-<i>contribs/make.slurm.patch</i>. 
+Yes. There is a patch available for GNU make version 3.81
+available as part of the SLURM distribution in the file
+<i>contribs/make.slurm.patch</i>.
 This patch will use SLURM to launch tasks across a job's current resource
 allocation. Depending upon the size of modules to be compiled, this may
 or may not improve performance. If most modules are thousands of lines
@@ -399,15 +399,15 @@ overhead of SLURM's task launch. Use with make's <i>-j</i> option within an
 existing SLURM allocation. Outside of a SLURM allocation, make's behavior
 will be unchanged.</p>
 
-<p><a name="terminal"><b>15. Can tasks be launched with a remote 
+<p><a name="terminal"><b>15. Can tasks be launched with a remote
 terminal?</b></a><br>
 In SLURM version 1.3 or higher, use srun's <i>--pty</i> option.
-Until then, you can accomplish this by starting an appropriate program 
-or script. In the simplest case (X11 over TCP with the DISPLAY 
-environment already set), executing <i>srun xterm</i> may suffice. 
-In the more general case, the following scripts should work. 
-<b>NOTE: The pathname to the additional scripts are included in the 
-variables BS and IS of the first script. You must change this in the 
+Until then, you can accomplish this by starting an appropriate program
+or script. In the simplest case (X11 over TCP with the DISPLAY
+environment already set), executing <i>srun xterm</i> may suffice.
+In the more general case, the following scripts should work.
+<b>NOTE: The pathname to the additional scripts are included in the
+variables BS and IS of the first script. You must change this in the
 first script.</b>
 Execute the script with the sbatch options desired.
 For example, <i>interactive -N2 -pdebug</i>.
@@ -417,10 +417,10 @@ For example, <i>interactive -N2 -pdebug</i>.
 # -*- coding: utf-8 -*-
 # Author: P&auml;r Andersson (National Supercomputer Centre, Sweden)
 # Version: 0.3 2007-07-30
-# 
-# This will submit a batch script that starts screen on a node. 
-# Then ssh is used to connect to the node and attach the screen. 
-# The result is very similar to an interactive shell in PBS 
+#
+# This will submit a batch script that starts screen on a node.
+# Then ssh is used to connect to the node and attach the screen.
+# The result is very similar to an interactive shell in PBS
 # (qsub -I)
 
 # Batch Script that starts SCREEN
@@ -441,7 +441,7 @@ while true;do
 
     # Check job status
     STATUS=`squeue -j $JOB -t PD,R -h -o %t`
-    
+
     if [ "$STATUS" = "R" ];then
 	# Job is running, break the while loop
 	break
@@ -450,9 +450,9 @@ while true;do
 	scancel $JOB
 	exit 1
     fi
-    
+
     echo -n "."
-    
+
 done
 
 # Determine the first node in the job:
@@ -464,14 +464,14 @@ ssh -X -t $NODE $IS slurm$JOB
 # The trap will now cancel the job before exiting.
 </pre>
 
-<p>NOTE: The above script executes the script below, 
+<p>NOTE: The above script executes the script below,
 named <i>_interactive</i>.</p>
 <pre>
 #!/bin/sh
 # -*- coding: utf-8 -*-
 # Author: P&auml;r Andersson  (National Supercomputer Centre, Sweden)
 # Version: 0.2 2007-07-30
-# 
+#
 # Simple batch script that starts SCREEN.
 
 exec screen -Dm -S slurm$SLURM_JOB_ID
@@ -499,27 +499,27 @@ fi
 exec screen -S $SCREENSESSION -rd
 </pre>
 
-<p><a name="force"><b>16. What does &quot;srun: Force Terminated job&quot; 
+<p><a name="force"><b>16. What does &quot;srun: Force Terminated job&quot;
 indicate?</b></a><br>
-The srun command normally terminates when the standard output and 
-error I/O from the spawned tasks end. This does not necessarily 
-happen at the same time that a job step is terminated. For example, 
+The srun command normally terminates when the standard output and
+error I/O from the spawned tasks end. This does not necessarily
+happen at the same time that a job step is terminated. For example,
 a file system problem could render a spawned task non-killable
-at the same time that I/O to srun is pending. Alternately a network 
+at the same time that I/O to srun is pending. Alternately a network
 problem could prevent the I/O from being transmitted to srun.
-In any event, the srun command is notified when a job step is 
-terminated, either upon reaching its time limit or being explicitly 
-killed. If the srun has not already terminated, the message 
-&quot;srun: Force Terminated job&quot; is printed. 
+In any event, the srun command is notified when a job step is
+terminated, either upon reaching its time limit or being explicitly
+killed. If the srun has not already terminated, the message
+&quot;srun: Force Terminated job&quot; is printed.
 If the job step's I/O does not terminate in a timely fashion
-thereafter, pending I/O is abandoned and the srun command 
+thereafter, pending I/O is abandoned and the srun command
 exits.</p>
 
-<p><a name="early_exit"><b>17. What does this mean: 
+<p><a name="early_exit"><b>17. What does this mean:
 &quot;srun: First task exited 30s ago&quot;
 followed by &quot;srun Job Failed&quot;?</b></a><br>
-The srun command monitors when tasks exit. By default, 30 seconds 
-after the first task exists, the job is killed. 
+The srun command monitors when tasks exit. By default, 30 seconds
+after the first task exists, the job is killed.
 This typically indicates some type of job failure and continuing
 to execute a parallel job when one of the tasks has exited is
 not normally productive. This behavior can be changed using srun's
@@ -527,30 +527,30 @@ not normally productive. This behavior can be changed using srun's
 period or disable the timeout altogether. See srun's man page
 for details.</p>
 
-<p><a name="memlock"><b>18. Why is my MPI job  failing due to the 
+<p><a name="memlock"><b>18. Why is my MPI job  failing due to the
 locked memory (memlock) limit being too low?</b></a><br>
-By default, SLURM propagates all of your resource limits at the 
-time of job submission to the spawned tasks. 
+By default, SLURM propagates all of your resource limits at the
+time of job submission to the spawned tasks.
 This can be disabled by specifically excluding the propagation of
 specific limits in the <i>slurm.conf</i> file. For example
-<i>PropagateResourceLimitsExcept=MEMLOCK</i> might be used to 
-prevent the propagation of a user's locked memory limit from a 
+<i>PropagateResourceLimitsExcept=MEMLOCK</i> might be used to
+prevent the propagation of a user's locked memory limit from a
 login node to a dedicated node used for his parallel job.
-If the user's resource limit is not propagated, the limit in 
+If the user's resource limit is not propagated, the limit in
 effect for the <i>slurmd</i> daemon will be used for the spawned job.
-A simple way to control this is to insure that user <i>root</i> has a 
-sufficiently large resource limit and insuring that <i>slurmd</i> takes 
+A simple way to control this is to insure that user <i>root</i> has a
+sufficiently large resource limit and insuring that <i>slurmd</i> takes
 full advantage of this limit. For example, you can set user root's
 locked memory limit ulimit to be unlimited on the compute nodes (see
-<i>"man limits.conf"</i>) and insuring that <i>slurmd</i> takes 
+<i>"man limits.conf"</i>) and insuring that <i>slurmd</i> takes
 full advantage of this limit (e.g. by adding something like
 <i>"ulimit -l unlimited"</i> to the <i>/etc/init.d/slurm</i>
-script used to initiate <i>slurmd</i>). 
+script used to initiate <i>slurmd</i>).
 Related information about <a href="#pam">PAM</a> is also available.</p>
 
-<p><a name="inactive"><b>19. Why is my batch job that launches no 
+<p><a name="inactive"><b>19. Why is my batch job that launches no
 job steps being killed?</b></a><br>
-SLURM has a configuration parameter <i>InactiveLimit</i> intended 
+SLURM has a configuration parameter <i>InactiveLimit</i> intended
 to kill jobs that do not spawn any job steps for a configurable
 period of time. Your system administrator may modify the <i>InactiveLimit</i>
 to satisfy your needs. Alternately, you can just spawn a job step
@@ -562,7 +562,7 @@ A line of this sort near the beginning of your script should suffice:<br>
 <p><a name="arbitrary"><b>20. How do I run specific tasks on certain nodes
 in my allocation?</b></a><br>
 One of the distribution methods for srun '<b>-m</b>
-or <b>--distribution</b>' is 'arbitrary'.  This means you can tell slurm to  
+or <b>--distribution</b>' is 'arbitrary'.  This means you can tell slurm to
 layout your tasks in any fashion you want.  For instance if I had an
 allocation of 2 nodes and wanted to run 4 tasks on the first node and
 1 task on the second and my nodes allocated from SLURM_NODELIST
@@ -570,7 +570,7 @@ where tux[0-1] my srun line would look like this.<p>
 <i>srun -n5 -m arbitrary -w tux[0,0,0,0,1] hostname</i><p>
 If I wanted something similar but wanted the third task to be on tux 1
 I could run this...<p>
-<i>srun -n5 -m arbitrary -w tux[0,0,1,0,0] hostname</i><p> 
+<i>srun -n5 -m arbitrary -w tux[0,0,1,0,0] hostname</i><p>
 Here is a simple perl script named arbitrary.pl that can be ran to easily lay
 out tasks on nodes as they are in SLURM_NODELIST<p>
 <pre>
@@ -605,7 +605,7 @@ We can now use this script in our srun line in this fashion.<p>
 This will layout 4 tasks on the first node in the allocation and 1
 task on the second node.</p>
 
-<p><a name="hold"><b>21. How can I temporarily prevent a job from running 
+<p><a name="hold"><b>21. How can I temporarily prevent a job from running
 (e.g. place it into a <i>hold</i> state)?</b></a><br>
 The easiest way to do this is to change a job's earliest begin time
 (optionally set at job submit time using the <i>--begin</i> option).
@@ -617,11 +617,11 @@ $ scontrol update JobId=1234 StartTime=now+30days
 $ scontrol update JobId=1234 StartTime=now
 </pre>
 
-<p><a name="mem_limit"><b>22. Why are jobs not getting the appropriate 
+<p><a name="mem_limit"><b>22. Why are jobs not getting the appropriate
 memory limit?</b></a><br>
 This is probably a variation on the <a href="#memlock">locked memory limit</a>
-problem described above. 
-Use the same solution for the AS (Address Space), RSS (Resident Set Size), 
+problem described above.
+Use the same solution for the AS (Address Space), RSS (Resident Set Size),
 or other limits as needed.</p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -630,85 +630,85 @@ or other limits as needed.</p>
 <h2>For Administrators</h2>
 
 <p><a name="suspend"><b>1. How is job suspend/resume useful?</b></a><br>
-Job suspend/resume is most useful to get particularly large jobs initiated 
+Job suspend/resume is most useful to get particularly large jobs initiated
 in a timely fashion with minimal overhead. Say you want to get a full-system
-job initiated. Normally you would need to either cancel all running jobs 
-or wait for them to terminate. Canceling jobs results in the loss of 
+job initiated. Normally you would need to either cancel all running jobs
+or wait for them to terminate. Canceling jobs results in the loss of
 their work to that point from either their beginning or last checkpoint.
 Waiting for the jobs to terminate can take hours, depending upon your
-system configuration. A more attractive alternative is to suspend the 
-running jobs, run the full-system job, then resume the suspended jobs. 
-This can easily be accomplished by configuring a special queue for 
-full-system jobs and using a script to control the process. 
-The script would stop the other partitions, suspend running jobs in those 
-partitions, and start the full-system partition. 
-The process can be reversed when desired.  
-One can effectively gang schedule (time-slice) multiple jobs 
-using this mechanism, although the algorithms to do so can get quite 
+system configuration. A more attractive alternative is to suspend the
+running jobs, run the full-system job, then resume the suspended jobs.
+This can easily be accomplished by configuring a special queue for
+full-system jobs and using a script to control the process.
+The script would stop the other partitions, suspend running jobs in those
+partitions, and start the full-system partition.
+The process can be reversed when desired.
+One can effectively gang schedule (time-slice) multiple jobs
+using this mechanism, although the algorithms to do so can get quite
 complex.
-Suspending and resuming a job makes use of the SIGSTOP and SIGCONT 
-signals respectively, so swap and disk space should be sufficient to 
+Suspending and resuming a job makes use of the SIGSTOP and SIGCONT
+signals respectively, so swap and disk space should be sufficient to
 accommodate all jobs allocated to a node, either running or suspended.
 
-<p><a name="fast_schedule"><b>2. How can I configure SLURM to use 
-the resources actually found on a node rather than what is defined 
+<p><a name="fast_schedule"><b>2. How can I configure SLURM to use
+the resources actually found on a node rather than what is defined
 in <i>slurm.conf</i>?</b></a><br>
 SLURM can either base its scheduling decisions upon the node
-configuration defined in <i>slurm.conf</i> or what each node 
-actually returns as available resources. 
+configuration defined in <i>slurm.conf</i> or what each node
+actually returns as available resources.
 This is controlled using the configuration parameter <i>FastSchedule</i>.
-Set its value to zero in order to use the resources actually 
+Set its value to zero in order to use the resources actually
 found on each node, but with a higher overhead for scheduling.
-A value of one is the default and results in the node configuration 
+A value of one is the default and results in the node configuration
 defined in <i>slurm.conf</i> being used. See &quot;man slurm.conf&quot;
 for more details.</p>
 
-<p><a name="return_to_service"><b>3. Why is a node shown in state 
+<p><a name="return_to_service"><b>3. Why is a node shown in state
 DOWN when the node has registered for service?</b></a><br>
 The configuration parameter <i>ReturnToService</i> in <i>slurm.conf</i>
-controls how DOWN nodes are handled. 
-Set its value to one in order for DOWN nodes to automatically be 
-returned to service once the <i>slurmd</i> daemon registers 
+controls how DOWN nodes are handled.
+Set its value to one in order for DOWN nodes to automatically be
+returned to service once the <i>slurmd</i> daemon registers
 with a valid node configuration.
-A value of zero is the default and results in a node staying DOWN 
-until an administrator explicitly returns it to service using 
+A value of zero is the default and results in a node staying DOWN
+until an administrator explicitly returns it to service using
 the command &quot;scontrol update NodeName=whatever State=RESUME&quot;.
-See &quot;man slurm.conf&quot; and &quot;man scontrol&quot; for more 
+See &quot;man slurm.conf&quot; and &quot;man scontrol&quot; for more
 details.</p>
 
 <p><a name="down_node"><b>4. What happens when a node crashes?</b></a><br>
-A node is set DOWN when the slurmd daemon on it stops responding 
+A node is set DOWN when the slurmd daemon on it stops responding
 for <i>SlurmdTimeout</i> as defined in <i>slurm.conf</i>.
-The node can also be set DOWN when certain errors occur or the 
+The node can also be set DOWN when certain errors occur or the
 node's configuration is inconsistent with that defined in <i>slurm.conf</i>.
-Any active job on that node will be killed unless it was submitted 
+Any active job on that node will be killed unless it was submitted
 with the srun option <i>--no-kill</i>.
-Any active job step on that node will be killed. 
+Any active job step on that node will be killed.
 See the slurm.conf and srun man pages for more information.</p>
- 
-<p><a name="multi_job"><b>5. How can I control the execution of multiple 
+
+<p><a name="multi_job"><b>5. How can I control the execution of multiple
 jobs per node?</b></a><br>
 There are two mechanisms to control this.
-If you want to allocate individual processors on a node to jobs, 
-configure <i>SelectType=select/cons_res</i>. 
+If you want to allocate individual processors on a node to jobs,
+configure <i>SelectType=select/cons_res</i>.
 See <a href="cons_res.html">Consumable Resources in SLURM</a>
-for details about this configuration.  
+for details about this configuration.
 If you want to allocate whole nodes to jobs, configure
 configure <i>SelectType=select/linear</i>.
 Each partition also has a configuration parameter <i>Shared</i>
-that enables more than one job to execute on each node. 
-See <i>man slurm.conf</i> for more information about these 
+that enables more than one job to execute on each node.
+See <i>man slurm.conf</i> for more information about these
 configuration parameters.</p>
 
-<p><a name="inc_plugin"><b>6. When the SLURM daemon starts, it 
-prints &quot;cannot resolve X plugin operations&quot; and exits. 
+<p><a name="inc_plugin"><b>6. When the SLURM daemon starts, it
+prints &quot;cannot resolve X plugin operations&quot; and exits.
 What does this mean?</b></a><br>
-This means that symbols expected in the plugin were 
-not found by the daemon. This typically happens when the 
-plugin was built or installed improperly or the configuration 
-file is telling the plugin to use an old plugin (say from the 
-previous version of SLURM). Restart the daemon in verbose mode 
-for more information (e.g. &quot;slurmctld -Dvvvvv&quot;). 
+This means that symbols expected in the plugin were
+not found by the daemon. This typically happens when the
+plugin was built or installed improperly or the configuration
+file is telling the plugin to use an old plugin (say from the
+previous version of SLURM). Restart the daemon in verbose mode
+for more information (e.g. &quot;slurmctld -Dvvvvv&quot;).
 
 <p><a name="sigpipe"><b>7. Why are user tasks intermittently dying
 at launch with SIGPIPE error messages?</b></a><br>
@@ -719,14 +719,14 @@ work around this problem by setting <i>CacheGroups=1</i> in your slurm.conf
 file.  However, be aware that you will need to run &quot;scontrol
 reconfigure &quot; any time your groups database is updated.
 
-<p><a name="maint_time"><b>8. How can I dry up the workload for a 
+<p><a name="maint_time"><b>8. How can I dry up the workload for a
 maintenance period?</b></a><br>
-Create a resource reservation as described by SLURM's 
+Create a resource reservation as described by SLURM's
 <a href="reservations.html">Resource Reservation Guide</a>.
 
-<p><a name="pam"><b>9. How can PAM be used to control a user's limits on 
+<p><a name="pam"><b>9. How can PAM be used to control a user's limits on
 or access to compute nodes?</b></a><br>
-First, enable SLURM's use of PAM by setting <i>UsePAM=1</i> in 
+First, enable SLURM's use of PAM by setting <i>UsePAM=1</i> in
 <i>slurm.conf</i>.<br>
 Second, establish a PAM configuration file for slurm in <i>/etc/pam.d/slurm</i>.
 A basic configuration you might use is:</p>
@@ -741,50 +741,50 @@ For example, to set the locked memory limit to unlimited for all users:</p>
 *   hard   memlock   unlimited
 *   soft   memlock   unlimited
 </pre>
-<p>Finally, you need to disable SLURM's forwarding of the limits from the 
-session from which the <i>srun</i> initiating the job ran. By default 
-all resource limits are propagated from that session. For example, adding 
-the following line to <i>slurm.conf</i> will prevent the locked memory 
+<p>Finally, you need to disable SLURM's forwarding of the limits from the
+session from which the <i>srun</i> initiating the job ran. By default
+all resource limits are propagated from that session. For example, adding
+the following line to <i>slurm.conf</i> will prevent the locked memory
 limit from being propagated:<i>PropagateResourceLimitsExcept=MEMLOCK</i>.</p>
 
-<p>We also have a PAM module for SLURM that prevents users from 
-logging into nodes that they have not been allocated (except for user 
+<p>We also have a PAM module for SLURM that prevents users from
+logging into nodes that they have not been allocated (except for user
 root, which can always login. pam_slurm is available for download from
 <a href="https://sourceforge.net/projects/slurm/">
 https://sourceforge.net/projects/slurm/</a> or use the
 <a href="http://www.debian.org/">Debian</a> package
 named <i>libpam-slurm</i>.
-The use of pam_slurm does not require <i>UsePAM</i> being set. The 
+The use of pam_slurm does not require <i>UsePAM</i> being set. The
 two uses of PAM are independent.
 
-<p><a name="time"><b>10. Why are jobs allocated nodes and then unable 
+<p><a name="time"><b>10. Why are jobs allocated nodes and then unable
 to initiate programs on some nodes?</b></a><br>
-This typically indicates that the time on some nodes is not consistent 
-with the node on which the <i>slurmctld</i> daemon executes. In order to 
-initiate a job step (or batch job), the <i>slurmctld</i> daemon generates 
-a credential containing a time stamp. If the <i>slurmd</i> daemon 
-receives a credential containing a time stamp later than the current 
-time or more than a few minutes in the past, it will be rejected. 
-If you check in the <i>SlurmdLog</i> on the nodes of interest, you 
-will likely see messages of this sort: "<i>Invalid job credential from 
-&lt;some IP address&gt;: Job credential expired</i>." Make the times 
+This typically indicates that the time on some nodes is not consistent
+with the node on which the <i>slurmctld</i> daemon executes. In order to
+initiate a job step (or batch job), the <i>slurmctld</i> daemon generates
+a credential containing a time stamp. If the <i>slurmd</i> daemon
+receives a credential containing a time stamp later than the current
+time or more than a few minutes in the past, it will be rejected.
+If you check in the <i>SlurmdLog</i> on the nodes of interest, you
+will likely see messages of this sort: "<i>Invalid job credential from
+&lt;some IP address&gt;: Job credential expired</i>." Make the times
 consistent across all of the nodes and all should be well.
 
-<p><a name="ping"><b>11. Why does <i>slurmctld</i> log that some nodes 
+<p><a name="ping"><b>11. Why does <i>slurmctld</i> log that some nodes
 are not responding even if they are not in any partition?</b></a><br>
-The <i>slurmctld</i> daemon periodically pings the <i>slurmd</i> 
-daemon on every configured node, even if not associated with any 
-partition. You can control the frequency of this ping with the 
+The <i>slurmctld</i> daemon periodically pings the <i>slurmd</i>
+daemon on every configured node, even if not associated with any
+partition. You can control the frequency of this ping with the
 <i>SlurmdTimeout</i> configuration parameter in <i>slurm.conf</i>.
 
-<p><a name="controller"><b>12. How should I relocated the primary or 
+<p><a name="controller"><b>12. How should I relocated the primary or
 backup controller?</b></a><br>
 If the cluster's computers used for the primary or backup controller
 will be out of service for an extended period of time, it may be desirable
 to relocate them. In order to do so, follow this procedure:</p>
 <ol>
 <li>Stop all SLURM daemons</li>
-<li>Modify the <i>ControlMachine</i>, <i>ControlAddr</i>, 
+<li>Modify the <i>ControlMachine</i>, <i>ControlAddr</i>,
 <i>BackupController</i>, and/or <i>BackupAddr</i> in the <i>slurm.conf</i> file</li>
 <li>Distribute the updated <i>slurm.conf</i> file to all nodes</li>
 <li>Restart all SLURM daemons</li>
@@ -792,70 +792,70 @@ to relocate them. In order to do so, follow this procedure:</p>
 <p>There should be no loss of any running or pending jobs. Insure that
 any nodes added to the cluster have a current <i>slurm.conf</i> file
 installed.
-<b>CAUTION:</b> If two nodes are simultaneously configured as the primary 
-controller (two nodes on which <i>ControlMachine</i> specify the local host 
+<b>CAUTION:</b> If two nodes are simultaneously configured as the primary
+controller (two nodes on which <i>ControlMachine</i> specify the local host
 and the <i>slurmctld</i> daemon is executing on each), system behavior will be
 destructive. If a compute node has an incorrect <i>ControlMachine</i> or
 <i>BackupController</i> parameter, that node may be rendered unusable, but no
 other harm will result.
 
-<p><a name="multi_slurm"><b>13. Can multiple SLURM systems be run in 
+<p><a name="multi_slurm"><b>13. Can multiple SLURM systems be run in
 parallel for testing purposes?</b></a><br>
 Yes, this is a great way to test new versions of SLURM.
-Just install the test version in a different location with a different 
-<i>slurm.conf</i>. 
-The test system's <i>slurm.conf</i> should specify different 
+Just install the test version in a different location with a different
+<i>slurm.conf</i>.
+The test system's <i>slurm.conf</i> should specify different
 pathnames and port numbers to avoid conflicts.
-The only problem is if more than one version of SLURM is configured 
+The only problem is if more than one version of SLURM is configured
 with <i>switch/elan</i> or <i>switch/federation</i>.
-In that case, there can be conflicting switch window requests from 
-the different SLURM systems. 
+In that case, there can be conflicting switch window requests from
+the different SLURM systems.
 This can be avoided by configuring the test system with <i>switch/none</i>.
-MPI jobs started on an Elan or Federation switch system without the 
-switch windows configured will not execute properly, but other jobs 
-will run fine. 
-Another option for testing on Elan or Federation systems is to use 
+MPI jobs started on an Elan or Federation switch system without the
+switch windows configured will not execute properly, but other jobs
+will run fine.
+Another option for testing on Elan or Federation systems is to use
 a different set of nodes for the different SLURM systems.
-That will permit both systems to allocate switch windows without 
+That will permit both systems to allocate switch windows without
 conflicts.
 
-<p><a name="multi_slurmd"><b>14. Can slurm emulate a larger 
+<p><a name="multi_slurmd"><b>14. Can slurm emulate a larger
 cluster?</b></a><br>
-Yes, this can be useful for testing purposes. 
+Yes, this can be useful for testing purposes.
 It has also been used to partition "fat" nodes into multiple SLURM nodes.
 There are two ways to do this.
-The best method for most conditions is to run one <i>slurmd</i> 
+The best method for most conditions is to run one <i>slurmd</i>
 daemon per emulated node in the cluster as follows.
 <ol>
-<li>When executing the <i>configure</i> program, use the option 
+<li>When executing the <i>configure</i> program, use the option
 <i>--enable-multiple-slurmd</i> (or add that option to your <i>~/.rpmmacros</i>
 file).</li>
 <li>Build and install SLURM in the usual manner.</li>
-<li>In <i>slurm.conf</i> define the desired node names (arbitrary 
+<li>In <i>slurm.conf</i> define the desired node names (arbitrary
 names used only by SLURM) as <i>NodeName</i> along with the actual
-address of the physical node in <i>NodeHostname</i>. Multiple 
+address of the physical node in <i>NodeHostname</i>. Multiple
 <i>NodeName</i> values can be mapped to a single
 <i>NodeHostname</i>.  Note that each <i>NodeName</i> on a single
 physical node needs to be configured to use a different port number.  You
 will also want to use the "%n" symbol in slurmd related path options in
 slurm.conf. </li>
 <li>When starting the <i>slurmd</i> daemon, include the <i>NodeName</i>
-of the node that it is supposed to serve on the execute line.</li> 
+of the node that it is supposed to serve on the execute line.</li>
 </ol>
-<p>It is strongly recommended that SLURM version 1.2 or higher be used 
+<p>It is strongly recommended that SLURM version 1.2 or higher be used
 for this due to its improved support for multiple slurmd daemons.
 See the
 <a href="programmer_guide.html#multiple_slurmd_support">Programmers Guide</a>
 for more details about configuring multiple slurmd support.</p>
 
-<p>In order to emulate a really large cluster, it can be more 
-convenient to use a single <i>slurmd</i> daemon. 
-That daemon will not be able to launch many tasks, but can 
+<p>In order to emulate a really large cluster, it can be more
+convenient to use a single <i>slurmd</i> daemon.
+That daemon will not be able to launch many tasks, but can
 suffice for developing or testing scheduling software.
 Do not run job steps with more than a couple of tasks each
 or execute more than a few jobs at any given time.
-Doing so may result in the <i>slurmd</i> daemon exhausting its 
-memory and failing. 
+Doing so may result in the <i>slurmd</i> daemon exhausting its
+memory and failing.
 <b>Use this method with caution.</b>
 <ol>
 <li>Execute the <i>configure</i> program with your normal options
@@ -868,8 +868,8 @@ name and address of the <b>one</b> physical node in <i>NodeHostName</i>
 and <i>NodeAddr</i>.
 Up to 64k nodes can be configured in this virtual cluster.</li>
 <li>Start your <i>slurmctld</i> and one <i>slurmd</i> daemon.
-It is advisable to use the "-c" option to start the daemons without 
-trying to preserve any state files from previous executions. 
+It is advisable to use the "-c" option to start the daemons without
+trying to preserve any state files from previous executions.
 Be sure to use the "-c" option when switch from this mode too.</li>
 <li>Create job allocations as desired, but do not run job steps
 with more than a couple of tasks.</li>
@@ -907,22 +907,22 @@ Yes in SLURM version 1.2 or higher.
 In the <i>slurm.conf</i> file, set <i>FastSchedule=2</i> and specify
 any desired node resource specifications (<i>Procs</i>, <i>Sockets</i>,
 <i>CoresPerSocket</i>, <i>ThreadsPerCore</i>, and/or <i>TmpDisk</i>).
-SLURM will use the resource specification for each node that is 
-given in <i>slurm.conf</i> and will not check these specifications 
+SLURM will use the resource specification for each node that is
+given in <i>slurm.conf</i> and will not check these specifications
 against those actually found on the node.
 
-<p><a name="credential_replayed"><b>16. What does a 
-&quot;credential replayed&quot; 
+<p><a name="credential_replayed"><b>16. What does a
+&quot;credential replayed&quot;
 error in the <i>SlurmdLogFile</i> indicate?</b></a><br>
-This error is indicative of the <i>slurmd</i> daemon not being able 
+This error is indicative of the <i>slurmd</i> daemon not being able
 to respond to job initiation requests from the <i>srun</i> command
 in a timely fashion (a few seconds).
 <i>Srun</i> responds by resending the job initiation request.
-When the <i>slurmd</i> daemon finally starts to respond, it 
+When the <i>slurmd</i> daemon finally starts to respond, it
 processes both requests.
 The second request is rejected and the event is logged with
 the "credential replayed" error.
-If you check the <i>SlurmdLogFile</i> and <i>SlurmctldLogFile</i>, 
+If you check the <i>SlurmdLogFile</i> and <i>SlurmctldLogFile</i>,
 you should see signs of the <i>slurmd</i> daemon's non-responsiveness.
 A variety of factors can be responsible for this problem
 including
@@ -931,58 +931,58 @@ including
 <li>Very slow Network Information Service (NIS)</li>
 <li>The <i>Prolog</i> script taking a long time to complete</li>
 </ul>
-<p>In Slurm version 1.2, this can be addressed with the 
+<p>In Slurm version 1.2, this can be addressed with the
 <i>MessageTimeout</i> configuration parameter by setting a
 value higher than the default 5 seconds.
-In earlier versions of Slurm, the <i>--msg-timeout</i> option 
+In earlier versions of Slurm, the <i>--msg-timeout</i> option
 of <i>srun</i> serves a similar purpose.
 
-<p><a name="large_time"><b>17. What does 
-&quot;Warning: Note very large processing time&quot; 
+<p><a name="large_time"><b>17. What does
+&quot;Warning: Note very large processing time&quot;
 in the <i>SlurmctldLogFile</i> indicate?</b></a><br>
 This error is indicative of some operation taking an unexpectedly
 long time to complete, over one second to be specific.
-Setting the value of <i>SlurmctldDebug</i> configuration parameter 
-a value of six or higher should identify which operation(s) are 
+Setting the value of <i>SlurmctldDebug</i> configuration parameter
+a value of six or higher should identify which operation(s) are
 experiencing long delays.
 This message typically indicates long delays in file system access
-(writing state information or getting user information). 
-Another possibility is that the node on which the slurmctld 
-daemon executes has exhausted memory and is paging. 
+(writing state information or getting user information).
+Another possibility is that the node on which the slurmctld
+daemon executes has exhausted memory and is paging.
 Try running the program <i>top</i> to check for this possibility.
 
-<p><a name="lightweight_core"><b>18. How can I add support for 
+<p><a name="lightweight_core"><b>18. How can I add support for
 lightweight core files?</b></a><br>
 SLURM supports lightweight core files by setting environment variables
-based upon the <i>srun --core</i> option. Of particular note, it 
+based upon the <i>srun --core</i> option. Of particular note, it
 sets the <i>LD_PRELOAD</i> environment variable to load new functions
-used to process a core dump. 
->First you will need to acquire and install a shared object 
+used to process a core dump.
+>First you will need to acquire and install a shared object
 library with the appropriate functions.
-Then edit the SLURM code in <i>src/srun/core-format.c</i> to 
-specify a name for the core file type, 
-add a test for the existence of the library, 
+Then edit the SLURM code in <i>src/srun/core-format.c</i> to
+specify a name for the core file type,
+add a test for the existence of the library,
 and set environment variables appropriately when it is used.
 
 <p><a name="limit_propagation"><b>19. Is resource limit propagation
 useful on a homogeneous cluster?</b></a><br>
 Resource limit propagation permits a user to modify resource limits
 and submit a job with those limits.
-By default, SLURM automatically propagates all resource limits in 
-effect at the time of job submission to the tasks spawned as part 
-of that job. 
+By default, SLURM automatically propagates all resource limits in
+effect at the time of job submission to the tasks spawned as part
+of that job.
 System administrators can utilize the <i>PropagateResourceLimits</i>
-and <i>PropagateResourceLimitsExcept</i> configuration parameters to 
+and <i>PropagateResourceLimitsExcept</i> configuration parameters to
 change this behavior.
-Users can override defaults using the <i>srun --propagate</i> 
-option. 
-See <i>"man slurm.conf"</i> and <i>"man srun"</i> for more information 
+Users can override defaults using the <i>srun --propagate</i>
+option.
+See <i>"man slurm.conf"</i> and <i>"man srun"</i> for more information
 about these options.
 
-<p><a name="clock"><b>20. Do I need to maintain synchronized 
+<p><a name="clock"><b>20. Do I need to maintain synchronized
 clocks on the cluster?</b></a><br>
-In general, yes. Having inconsistent clocks may cause nodes to 
-be unusable. SLURM log files should contain references to 
+In general, yes. Having inconsistent clocks may cause nodes to
+be unusable. SLURM log files should contain references to
 expired credentials. For example:
 <pre>
 error: Munge decode failed: Expired credential
@@ -990,74 +990,74 @@ ENCODED: Wed May 12 12:34:56 2008
 DECODED: Wed May 12 12:01:12 2008
 </pre>
 
-<p><a name="cred_invalid"><b>21. Why are &quot;Invalid job credential&quot; 
+<p><a name="cred_invalid"><b>21. Why are &quot;Invalid job credential&quot;
 errors generated?</b></a><br>
-This error is indicative of SLURM's job credential files being inconsistent across 
-the cluster. All nodes in the cluster must have the matching public and private 
-keys as defined by <b>JobCredPrivateKey</b> and <b>JobCredPublicKey</b> in the 
+This error is indicative of SLURM's job credential files being inconsistent across
+the cluster. All nodes in the cluster must have the matching public and private
+keys as defined by <b>JobCredPrivateKey</b> and <b>JobCredPublicKey</b> in the
 slurm configuration file <b>slurm.conf</b>.
 
-<p><a name="cred_replay"><b>22. Why are 
-&quot;Task launch failed on node ... Job credential replayed&quot; 
+<p><a name="cred_replay"><b>22. Why are
+&quot;Task launch failed on node ... Job credential replayed&quot;
 errors generated?</b></a><br>
-This error indicates that a job credential generated by the slurmctld daemon 
-corresponds to a job that the slurmd daemon has already revoked. 
-The slurmctld daemon selects job ID values based upon the configured 
-value of <b>FirstJobId</b> (the default value is 1) and each job gets 
+This error indicates that a job credential generated by the slurmctld daemon
+corresponds to a job that the slurmd daemon has already revoked.
+The slurmctld daemon selects job ID values based upon the configured
+value of <b>FirstJobId</b> (the default value is 1) and each job gets
 a value one larger than the previous job.
-On job termination, the slurmctld daemon notifies the slurmd on each 
-allocated node that all processes associated with that job should be 
-terminated. 
-The slurmd daemon maintains a list of the jobs which have already been 
-terminated to avoid replay of task launch requests. 
-If the slurmctld daemon is cold-started (with the &quot;-c&quot; option 
-or &quot;/etc/init.d/slurm startclean&quot;), it starts job ID values 
+On job termination, the slurmctld daemon notifies the slurmd on each
+allocated node that all processes associated with that job should be
+terminated.
+The slurmd daemon maintains a list of the jobs which have already been
+terminated to avoid replay of task launch requests.
+If the slurmctld daemon is cold-started (with the &quot;-c&quot; option
+or &quot;/etc/init.d/slurm startclean&quot;), it starts job ID values
 over based upon <b>FirstJobId</b>.
-If the slurmd is not also cold-started, it will reject job launch requests 
-for jobs that it considers terminated. 
+If the slurmd is not also cold-started, it will reject job launch requests
+for jobs that it considers terminated.
 This solution to this problem is to cold-start all slurmd daemons whenever
 the slurmctld daemon is cold-started.
 
 <p><a name="globus"><b>23. Can SLURM be used with Globus?</b></a><br>
-Yes. Build and install SLURM's Torque/PBS command wrappers along with 
-the Perl APIs from SLURM's <i>contribs</i> directory and configure 
+Yes. Build and install SLURM's Torque/PBS command wrappers along with
+the Perl APIs from SLURM's <i>contribs</i> directory and configure
 <a href="http://www-unix.globus.org/">Globus</a> to use those PBS commands.
-Note there are RPMs available for both of these packages, named 
+Note there are RPMs available for both of these packages, named
 <i>torque</i> and <i>perlapi</i> respectively.
 
-<p><a name="time_format"><b>24. Can SLURM time output format include the 
+<p><a name="time_format"><b>24. Can SLURM time output format include the
 year?</b></a><br>
-The default SLURM time format output is <i>MM/DD-HH:MM:SS</i>. 
+The default SLURM time format output is <i>MM/DD-HH:MM:SS</i>.
 Define &quot;ISO8601&quot; at SLURM build time to get the time format
 <i>YYYY-MM-DDTHH:MM:SS</i>.
-Note that this change in format will break anything that parses 
+Note that this change in format will break anything that parses
 SLURM output expecting the old format (e.g. LSF, Maui or Moab).
 
-<p><a name="file_limit"><b>25. What causes the error 
+<p><a name="file_limit"><b>25. What causes the error
 &quot;Unable to accept new connection: Too many open files&quot;?</b></a><br>
-The srun command automatically increases its open file limit to 
+The srun command automatically increases its open file limit to
 the hard limit in order to process all of the standard input and output
 connections to the launched tasks. It is recommended that you set the
 open file hard limit to 8192 across the cluster.
 
-<p><a name="slurmd_log"><b>26. Why does the setting of <i>SlurmdDebug</i> 
+<p><a name="slurmd_log"><b>26. Why does the setting of <i>SlurmdDebug</i>
 fail to log job step information at the appropriate level?</b></a><br>
-There are two programs involved here. One is <b>slurmd</b>, which is 
-a persistent daemon running at the desired debug level. The second 
+There are two programs involved here. One is <b>slurmd</b>, which is
+a persistent daemon running at the desired debug level. The second
 program is <b>slurmstep</b>, which executed the user job and its
-debug level is controlled by the user. Submitting the job with 
-an option of <i>--debug=#</i> will result in the desired level of 
-detail being logged in the <i>SlurmdLogFile</i> plus the output 
+debug level is controlled by the user. Submitting the job with
+an option of <i>--debug=#</i> will result in the desired level of
+detail being logged in the <i>SlurmdLogFile</i> plus the output
 of the program.
 
-<p><a name="rpm"><b>27. Why isn't the auth_none.so (or other file) in a 
+<p><a name="rpm"><b>27. Why isn't the auth_none.so (or other file) in a
 SLURM RPM?</b></a><br>
 The auth_none plugin is in a separate RPM and not built by default.
-Using the auth_none plugin means that SLURM communications are not 
-authenticated, so you probably do not want to run in this mode of operation 
-except for testing purposes. If you want to build the auth_none RPM then 
-add <i>--with auth_none</i> on the rpmbuild command line or add 
-<i>%_with_auth_none</i> to your ~/rpmmacros file. See the file slurm.spec 
+Using the auth_none plugin means that SLURM communications are not
+authenticated, so you probably do not want to run in this mode of operation
+except for testing purposes. If you want to build the auth_none RPM then
+add <i>--with auth_none</i> on the rpmbuild command line or add
+<i>%_with_auth_none</i> to your ~/rpmmacros file. See the file slurm.spec
 in the SLURM distribution for a list of other options.
 
 <p><a name="slurmdbd"><b>28. Why should I use the slurmdbd instead of the
@@ -1079,19 +1079,19 @@ slurmdbd.
    other cluster's nodes.
 
 <p><a name="debug"><b>29. How can I build SLURM with debugging symbols?</b></a></br>
-Set your CFLAGS environment variable before building. 
+Set your CFLAGS environment variable before building.
 You want the "-g" option to produce debugging information and
 "-O0" to set the optimization level to zero (off). For example:<br>
 CFLAGS="-g -O0" ./configure ...
 
-<p><a name="state_preserve"><b>30. How can I easily preserve drained node 
+<p><a name="state_preserve"><b>30. How can I easily preserve drained node
 information between major SLURM updates?</b></a><br>
-Major SLURM updates generally have changes in the state save files and 
-communication protocols, so a cold-start (without state) is generally 
+Major SLURM updates generally have changes in the state save files and
+communication protocols, so a cold-start (without state) is generally
 required. If you have nodes in a DRAIN state and want to preserve that
 information, you can easily build a script to preserve that information
-using the <i>sinfo</i> command. The following command line will report the 
-<i>Reason</i> field for every node in a DRAIN state and write the output 
+using the <i>sinfo</i> command. The following command line will report the
+<i>Reason</i> field for every node in a DRAIN state and write the output
 in a form that can be executed later to restore state.
 <pre>
 sinfo -t drain -h -o "scontrol update nodename='%N' state=drain reason='%E'"
@@ -1100,13 +1100,13 @@ sinfo -t drain -h -o "scontrol update nodename='%N' state=drain reason='%E'"
 <p><a name="health_check"><b>31. Why doesn't the <i>HealthCheckProgram</i>
 execute on DOWN nodes?</a></b><br>
 Hierarchical communications are used for sending this message. If there
-are DOWN nodes in the communications hierarchy, messages will need to 
+are DOWN nodes in the communications hierarchy, messages will need to
 be re-routed. This limits SLURM's ability to tightly synchronize the
 execution of the <i>HealthCheckProgram</i> across the cluster, which
-could adversely impact performance of parallel applications. 
+could adversely impact performance of parallel applications.
 The use of CRON or node startup scripts may be better suited to insure
 that <i>HealthCheckProgram</i> gets executed on nodes that are DOWN
-in SLURM. If you still want to have SLURM try to execute 
+in SLURM. If you still want to have SLURM try to execute
 <i>HealthCheckProgram</i> on DOWN nodes, apply the following patch:
 <pre>
 Index: src/slurmctld/ping_nodes.c
@@ -1125,29 +1125,29 @@ Index: src/slurmctld/ping_nodes.c
                         continue;
 </pre>
 
-<p><a name="batch_lost"><b>32. What is the meaning of the error 
+<p><a name="batch_lost"><b>32. What is the meaning of the error
 &quot;Batch JobId=# missing from master node, killing it&quot;?</b></a><br>
 A shell is launched on node zero of a job's allocation to execute
 the submitted program. The <i>slurmd</i> daemon executing on each compute
 node will periodically report to the <i>slurmctld</i> what programs it
 is executing. If a batch program is expected to be running on some
 node (i.e. node zero of the job's allocation) and is not found, the
-message above will be logged and the job cancelled. This typically is 
-associated with exhausting memory on the node or some other critical 
-failure that cannot be recovered from. The equivalent message in 
-earlier releases of slurm is 
+message above will be logged and the job cancelled. This typically is
+associated with exhausting memory on the node or some other critical
+failure that cannot be recovered from. The equivalent message in
+earlier releases of slurm is
 &quot;Master node lost JobId=#, killing it&quot;.
 
 <p><a name="accept_again"><b>33. What does the messsage
-&quot;srun: error: Unable to accept connection: Resources temporarily unavailable&quot; 
+&quot;srun: error: Unable to accept connection: Resources temporarily unavailable&quot;
 indicate?</b></a><br>
 This has been reported on some larger clusters running SUSE Linux when
 a user's resource limits are reached. You may need to increase limits
 for locked memory and stack size to resolve this problem.
 
-<p><a name="task_prolog"><b>34. How could I automatically print a job's 
+<p><a name="task_prolog"><b>34. How could I automatically print a job's
 SLURM job ID to its standard output?</b></a></br>
-The configured <i>TaskProlog</i> is the only thing that can write to 
+The configured <i>TaskProlog</i> is the only thing that can write to
 the job's standard output or set extra environment variables for a job
 or job step. To write to the job's standard output, precede the message
 with "print ". To export environment variables, output a line of this
@@ -1175,7 +1175,7 @@ How can I start a job under SLURM without the scheduler?</b></a></br>
 When SLURM is configured to use the Moab or Maui scheduler, all submitted
 jobs have their priority initialized to zero, which SLURM treats as a held
 job. The job only begins when Moab or Maui decide where and when to start
-the job, setting the required node list and setting the job priority to 
+the job, setting the required node list and setting the job priority to
 a non-zero value. To circumvent this, submit your job using a SLURM or
 Moab command then manually set its priority to a non-zero value (must be
 done by user root). For example:</p>
@@ -1185,8 +1185,8 @@ $ scontrol update jobid=1234 priority=1000000
 <p>Note that changes in the configured value of <i>SchedulerType</i> only
 take effect when the <i>slurmctld</i> daemon is restarted (reconfiguring
 SLURM will not change this parameter. You will also manually need to
-modify the priority of every pending job. 
-When changing to Moab or Maui scheduling, set every job priority to zero. 
+modify the priority of every pending job.
+When changing to Moab or Maui scheduling, set every job priority to zero.
 When changing from Moab or Maui scheduling, set every job priority to a
 non-zero value (preferably fairly large, say 1000000).</p>
 
@@ -1195,14 +1195,14 @@ running even though the job is supposed to be completed?</b></a></br>
 SLURM relies upon a configurable process tracking plugin to determine
 when all of the processes associated with a job or job step have completed.
 Those plugins relying upon a kernel patch can reliably identify every process.
-Those plugins dependent upon process group IDs or parent process IDs are not 
+Those plugins dependent upon process group IDs or parent process IDs are not
 reliable. See the <i>ProctrackType</i> description in the <i>slurm.conf</i>
 man page for details. We rely upon the sgi_job for most systems.</p>
 
 <p><a name="slurmd_oom"><b>37. How can I prevent the <i>slurmd</i> and
-<i>slurmstepd</i> daemons from being killed when a node's memory 
+<i>slurmstepd</i> daemons from being killed when a node's memory
 is exhausted?</b></a></br>
-You can the value set in the <i>/proc/self/oom_adj</i> for 
+You can the value set in the <i>/proc/self/oom_adj</i> for
 <i>slurmd</i> and <i>slurmstepd</i> by initiating the <i>slurmd</i>
 daemon with the <i>SLURMD_OOM_ADJ</i> and/or <i>SLURMSTEPD_OOM_ADJ</i>
 environment variables set to the desired values.
@@ -1211,7 +1211,7 @@ A value of -17 typically will disable killing.</p>
 <p><a name="ubuntu"><b>38. I see my host of my calling node as 127.0.1.1
     instead of the correct IB address.  Why is that?</b></a></br>
 Some systems by default will put your host in the /etc/hosts file as
-    something like 
+    something like
 <pre>
 127.0.1.1	snowflake.llnl.gov	snowflake
 </pre>
@@ -1221,17 +1221,17 @@ communication doesn't work.  Solution is to either remove this line or
 set a different nodeaddr that is known by your other nodes.</p>
 
 <p><a name="stop_sched"><b>38. How can I stop SLURM from scheduling jobs?</b></a></br>
-You can stop SLURM from scheduling jobs on a per partition basis by setting 
-that partition's state to DOWN. Set its state UP to resume scheduling. 
+You can stop SLURM from scheduling jobs on a per partition basis by setting
+that partition's state to DOWN. Set its state UP to resume scheduling.
 For example:
 <pre>
 $ scontrol update PartitionName=foo State=DOWN
 $ scontrol update PartitionName=bar State=UP
 </pre></p>
 
-<p><a name="scontrol_multi_jobs"><b>39. Can I update multiple jobs with a 
+<p><a name="scontrol_multi_jobs"><b>39. Can I update multiple jobs with a
 single <i>scontrol</i> command?</b></a></br>
-No, but you can probably use <i>squeue</i> to build the script taking 
+No, but you can probably use <i>squeue</i> to build the script taking
 advantage of its filtering and formatting options. For example:
 <pre>
 $ squeue -tpd -h -o "scontrol update jobid=%i priority=1000" >my.script
diff --git a/doc/html/footer.txt b/doc/html/footer.txt
index c333e9dddfcbfd09a8b4822bb8a57ebaa5ea423c..bee6ae12e5c39572bc1747e99960eb04cc77a3fc 100644
--- a/doc/html/footer.txt
+++ b/doc/html/footer.txt
@@ -10,7 +10,7 @@
 <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
 <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
 <div id="right2"><span class="smalltextblue">Operated by
-Lawrence Livermore National Security, LLC, for the</span> 
+Lawrence Livermore National Security, LLC, for the</span>
 <a href="http://www.energy.gov/" target="_blank" class="footer">
 Department of Energy's</a><br />
 <a href="http://www.nnsa.doe.gov/" target="_blank" class="footer">
diff --git a/doc/html/gang_scheduling.shtml b/doc/html/gang_scheduling.shtml
index ccbae6165920ea3a7b7afdc7902b28b6b627b8c0..1ab53791ea135c97436d40ca57a1d08ca8d8f391 100644
--- a/doc/html/gang_scheduling.shtml
+++ b/doc/html/gang_scheduling.shtml
@@ -3,64 +3,64 @@
 <H1>Gang Scheduling</H1>
 
 <P>
-SLURM supports timesliced gang scheduling in which two or more jobs are 
-allocated to the same resources and these jobs are alternately suspended to 
-let one job at a time have dedicated access to the resources for a configured 
+SLURM supports timesliced gang scheduling in which two or more jobs are
+allocated to the same resources and these jobs are alternately suspended to
+let one job at a time have dedicated access to the resources for a configured
 period of time.
 SLURM also supports preemptive priority job scheduling in which a a higher
 priority job can preempt a lower priority one until the higher priority job
-completes. 
+completes.
 See the <a href="preempt.html">Preemption</a> document for more information.
 </P>
 <P>
 A resource manager that supports timeslicing can improve responsiveness
-and utilization by allowing more jobs to begin running sooner. 
-Shorter-running jobs no longer have to wait in a queue behind longer-running 
-jobs. 
-Instead they can be run "in parallel" with the longer-running jobs, which will 
-allow them to start and finish quicker. 
-Throughput is also improved because overcommitting the resources provides 
+and utilization by allowing more jobs to begin running sooner.
+Shorter-running jobs no longer have to wait in a queue behind longer-running
+jobs.
+Instead they can be run "in parallel" with the longer-running jobs, which will
+allow them to start and finish quicker.
+Throughput is also improved because overcommitting the resources provides
 opportunities for "local backfilling" to occur (see example below).
 </P>
 <P>
-In SLURM version 2.0 and earlier, the <I>sched/gang</I> plugin provides 
-timeslicing. 
+In SLURM version 2.0 and earlier, the <I>sched/gang</I> plugin provides
+timeslicing.
 In SLURM version 2.1, the gang scheduling logic was moved directly into the
-main code bases to permit use of both gang scheduling plus the backfill 
+main code bases to permit use of both gang scheduling plus the backfill
 scheduler plugin, <i>sched/backfill</I>.
-In either case, the gang scheduling logic monitors each of the partitions in 
-SLURM. 
-If a new job has been allocated to resources in a partition that have already 
-been allocated to an existing job, then the plugin will suspend the new job 
-until the configured <I>SchedulerTimeslice</I> interval has elapsed. 
-Then it will suspend the running job and let the new job make use of the 
-resources for a <I>SchedulerTimeslice</I> interval. 
+In either case, the gang scheduling logic monitors each of the partitions in
+SLURM.
+If a new job has been allocated to resources in a partition that have already
+been allocated to an existing job, then the plugin will suspend the new job
+until the configured <I>SchedulerTimeslice</I> interval has elapsed.
+Then it will suspend the running job and let the new job make use of the
+resources for a <I>SchedulerTimeslice</I> interval.
 This will continue until one of the jobs terminates.
 </P>
 
 <H2>Configuration</H2>
 
 <P>
-There are several important configuration parameters relating to 
+There are several important configuration parameters relating to
 gang scheduling:
 </P>
 <UL>
 <LI>
-<B>SelectType</B>: The SLURM gang scheduler supports nodes 
-allocated by the <I>select/linear</I> plugin and socket/core/CPU resources 
+<B>SelectType</B>: The SLURM gang scheduler supports nodes
+allocated by the <I>select/linear</I> plugin and socket/core/CPU resources
 allocated by the <I>select/cons_res</I> plugin.
 </LI>
 <LI>
-<B>SelectTypeParameter</B>: Since resources will be getting overallocated 
-with jobs, the resource selection plugin should be configured to track the 
-amount of memory used by each job to ensure that memory page swapping does 
-not occur. When <I>select/linear</I> is chosen, we recommend setting 
+<B>SelectTypeParameter</B>: Since resources will be getting overallocated
+with jobs, the resource selection plugin should be configured to track the
+amount of memory used by each job to ensure that memory page swapping does
+not occur. When <I>select/linear</I> is chosen, we recommend setting
 <I>SelectTypeParameter=CR_Memory</I>. When <I>select/cons_res</I> is
 chosen, we recommend including Memory as a resource (ex.
 <I>SelectTypeParameter=CR_Core_Memory</I>).
 </LI>
 <LI>
-<B>DefMemPerCPU</B>: Since job requests may not explicitly specify 
+<B>DefMemPerCPU</B>: Since job requests may not explicitly specify
 a memory requirement, we also recommend configuring
 <I>DefMemPerCPU</I> (default memory per allocated CPU) or
 <I>DefMemPerNode</I> (default memory per allocated node).
@@ -75,7 +75,7 @@ at job submission time to specify their memory requirements.
 If you wish to enforce memory limits, accounting must be enabled
 using the <I>JobAcctGatherType</I> and <I>JobAcctGatherFrequency</I>
 parameters. If accounting is enabled and a job exceeds its configured
-memory limits, it will be canceled in order to prevent it from 
+memory limits, it will be canceled in order to prevent it from
 adversely effecting other jobs sharing the same resources.
 </LI>
 <LI>
@@ -84,31 +84,31 @@ Additional options may be specified to enable job preemption in addition
 to gang scheduling.
 </LI>
 <LI>
-<B>SchedulerTimeSlice</B>: The default timeslice interval is 30 seconds. 
-To change this duration, set <I>SchedulerTimeSlice</I> to the desired interval 
-(in seconds) in <I>slurm.conf</I>. For example, to set the timeslice interval 
-to one minute, set <I>SchedulerTimeSlice=60</I>. Short values can increase 
+<B>SchedulerTimeSlice</B>: The default timeslice interval is 30 seconds.
+To change this duration, set <I>SchedulerTimeSlice</I> to the desired interval
+(in seconds) in <I>slurm.conf</I>. For example, to set the timeslice interval
+to one minute, set <I>SchedulerTimeSlice=60</I>. Short values can increase
 the overhead of gang scheduling.
 </LI>
 <LI>
-<B>Shared</B>: Configure the partition's <I>Shared</I> setting to 
-<I>FORCE</I> for all partitions in which timeslicing is to take place. 
-The <I>FORCE</I> option supports an additional parameter that controls 
-how many jobs can share a resource (FORCE[:max_share]). By default the 
-max_share value is 4. To allow up to 6 jobs from this partition to be 
+<B>Shared</B>: Configure the partition's <I>Shared</I> setting to
+<I>FORCE</I> for all partitions in which timeslicing is to take place.
+The <I>FORCE</I> option supports an additional parameter that controls
+how many jobs can share a resource (FORCE[:max_share]). By default the
+max_share value is 4. To allow up to 6 jobs from this partition to be
 allocated to a common resource, set <I>Shared=FORCE:6</I>. To only let 2 jobs
 timeslice on the same resources, set <I>Shared=FORCE:2</I>.
 </LI>
 </UL>
 <P>
-In order to enable gang scheduling after making the configuration changes 
-described above, restart SLURM if it is already running. Any change to the 
-plugin settings in SLURM requires a full restart of the daemons. If you 
+In order to enable gang scheduling after making the configuration changes
+described above, restart SLURM if it is already running. Any change to the
+plugin settings in SLURM requires a full restart of the daemons. If you
 just change the partition <I>Shared</I> setting, this can be updated with
 <I>scontrol reconfig</I>.
 </P>
 <P>
-For an advanced topic discussion on the potential use of swap space, 
+For an advanced topic discussion on the potential use of swap space,
 see "Making use of swap space" in the "Future Work" section below.
 </P>
 
@@ -120,7 +120,7 @@ allocated to all jobs. For each partition an "active bitmap" is maintained that
 tracks all concurrently running jobs in the SLURM cluster. Each time a new
 job is allocated to resources in a partition, the gang scheduler
 compares these newly allocated resources with the resources already maintained
-in the "active bitmap". 
+in the "active bitmap".
 If these two sets of resources are disjoint then the new job is added to the "active bitmap". If these two sets of resources overlap then
 the new job is suspended. All jobs are tracked in a per-partition job queue
 within the gang scheduler logic.
@@ -136,18 +136,18 @@ run (this will be the first suspended job in the run queue). Each following job
 is then compared with the new "active bitmap", and if the job can be run
 concurrently with the other "active" jobs then the job is added. Once this is
 complete then the <I>timeslicer thread</I> suspends any currently running jobs
-that are no longer part of the "active bitmap", and resumes jobs that are new 
+that are no longer part of the "active bitmap", and resumes jobs that are new
 to the "active bitmap".
 </P>
 <P>
-This <I>timeslicer thread</I> algorithm for rotating jobs is designed to prevent jobs from starving (remaining in the suspended state indefinitely) and 
-to be as fair as possible in the distribution of runtime while still keeping 
+This <I>timeslicer thread</I> algorithm for rotating jobs is designed to prevent jobs from starving (remaining in the suspended state indefinitely) and
+to be as fair as possible in the distribution of runtime while still keeping
 all of the resources as busy as possible.
 </P>
 <P>
 The gang scheduler suspends jobs via the same internal functions that
-support <I>scontrol suspend</I> and <I>scontrol resume</I>. 
-A good way to observe the operation of the timeslicer is by running 
+support <I>scontrol suspend</I> and <I>scontrol resume</I>.
+A good way to observe the operation of the timeslicer is by running
 <I>squeue -i&lt;time&gt;</I> in a terminal window where <I>time</I> is set
 equal to <I>SchedulerTimeSlice</I>.
 </P>
@@ -155,7 +155,7 @@ equal to <I>SchedulerTimeSlice</I>.
 <H2>A Simple Example</H2>
 
 <P>
-The following example is configured with <I>select/linear</I> and <I>Shared=FORCE</I>. 
+The following example is configured with <I>select/linear</I> and <I>Shared=FORCE</I>.
 This example takes place on a small cluster of 5 nodes:
 </P>
 <PRE>
@@ -200,7 +200,7 @@ JOBID PARTITION    NAME  USER ST  TIME NODES NODELIST
     4    active  myload  user  S  0:00     5 n[12-16]
 </PRE>
 <P>
-After 30 seconds the gang scheduler swaps jobs, and now job 4 is the 
+After 30 seconds the gang scheduler swaps jobs, and now job 4 is the
 active one:
 </P>
 <PRE>
@@ -226,7 +226,7 @@ JOBID PARTITION    NAME  USER ST  TIME NODES NODELIST
 
 <P>
 <B>A possible side effect of timeslicing</B>: Note that jobs that are
-immediately suspended may cause their <I>srun</I> commands to produce the 
+immediately suspended may cause their <I>srun</I> commands to produce the
 following output:
 </P>
 <PRE>
@@ -314,7 +314,7 @@ Without timeslicing and without the backfill scheduler enabled, job 14 has to
 wait for job 13 to finish.
 </P>
 <P>
-This is called "local" backfilling because the backfilling only occurs with 
+This is called "local" backfilling because the backfilling only occurs with
 jobs close enough in the queue to get allocated by the scheduler as part of
 oversubscribing the resources. Recall that the number of jobs that can
 overcommit a resource is controlled by the <I>Shared=FORCE:max_share</I> value,
@@ -335,23 +335,23 @@ The following two examples illustrate the primary difference between
 When <I>CR_CPU</I> (or <I>CR_CPU_Memory</I>) is configured then the selector
 treats the CPUs as simple, <I>interchangeable</I> computing resources. However
 when <I>CR_Core</I> (or <I>CR_Core_Memory</I>) is enabled the selector treats
-the CPUs as individual resources that are <U>specifically</U> allocated to 
+the CPUs as individual resources that are <U>specifically</U> allocated to
 jobs.
 This subtle difference is highlighted when timeslicing is enabled.
 </P>
 <P>
 In both examples 6 jobs are submitted. Each job requests 2 CPUs per node, and
-all of the nodes contain two quad-core processors. The timeslicer will 
-initially let the first 4 jobs run and suspend the last 2 jobs. 
-The manner in which these jobs are timesliced depends upon the configured 
+all of the nodes contain two quad-core processors. The timeslicer will
+initially let the first 4 jobs run and suspend the last 2 jobs.
+The manner in which these jobs are timesliced depends upon the configured
 <I>SelectTypeParameter</I>.
 </P>
 <P>
-In the first example <I>CR_Core_Memory</I> is configured. Note that jobs 46 
-and 47 don't <U>ever</U> get suspended. This is because they are not sharing 
-their cores with any other job. 
+In the first example <I>CR_Core_Memory</I> is configured. Note that jobs 46
+and 47 don't <U>ever</U> get suspended. This is because they are not sharing
+their cores with any other job.
 Jobs 48 and 49 were allocated to the same cores as jobs 44 and 45.
-The timeslicer recognizes this and timeslices only those jobs: 
+The timeslicer recognizes this and timeslices only those jobs:
 </P>
 <PRE>
 [user@n16 load]$ <B>sinfo</B>
@@ -493,12 +493,12 @@ Note that the runtime of all 6 jobs is roughly equal. Jobs 51-54 ran first so
 they're slightly ahead, but so far all jobs have run for at least 3 minutes.
 </P>
 <P>
-At the core level this means that SLURM relies on the Linux kernel to move 
-jobs around on the cores to maximize performance. 
-This is different than when <I>CR_Core_Memory</I> was configured and the jobs 
-would effectively remain "pinned" to their specific cores for the duration of 
-the job. 
-Note that <I>CR_Core_Memory</I> supports CPU binding, while 
+At the core level this means that SLURM relies on the Linux kernel to move
+jobs around on the cores to maximize performance.
+This is different than when <I>CR_Core_Memory</I> was configured and the jobs
+would effectively remain "pinned" to their specific cores for the duration of
+the job.
+Note that <I>CR_Core_Memory</I> supports CPU binding, while
 <I>CR_CPU_Memory</I> does not.
 </P>
 
@@ -508,16 +508,16 @@ Note that <I>CR_Core_Memory</I> supports CPU binding, while
 <B>Making use of swap space</B>: (note that this topic is not currently
 scheduled for development, unless someone would like to pursue this) It should
 be noted that timeslicing does provide an interesting mechanism for high
-performance jobs to make use of swap space. 
-The optimal scenario is one in which suspended jobs are "swapped out" and 
-active jobs are "swapped in". 
-The swapping activity would only occur once every  <I>SchedulerTimeslice</I> 
+performance jobs to make use of swap space.
+The optimal scenario is one in which suspended jobs are "swapped out" and
+active jobs are "swapped in".
+The swapping activity would only occur once every  <I>SchedulerTimeslice</I>
 interval.
 </P>
 <P>
 However, SLURM should first be modified to include support for scheduling jobs
 into swap space and to provide controls to prevent overcommitting swap space.
-For now this idea could be experimented with by disabling memory support in 
+For now this idea could be experimented with by disabling memory support in
 the selector and submitting appropriately sized jobs.
 </P>
 
diff --git a/doc/html/header.txt b/doc/html/header.txt
index 20b947f3501f2b9269a28480f9d2bb5e842c184f..64cd41e80c41666ca97f59a0b096f549c7af795a 100644
--- a/doc/html/header.txt
+++ b/doc/html/header.txt
@@ -6,7 +6,7 @@
 <head>
 <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
 <meta http-equiv="Pragma" content="no-cache">
-<meta http-equiv="keywords" content="Simple Linux Utility for Resource Management, SLURM, resource management, 
+<meta http-equiv="keywords" content="Simple Linux Utility for Resource Management, SLURM, resource management,
 Linux clusters, high-performance computing, Livermore Computing">
 <meta name="LLNLRandR" content="LLNL-WEB-411573">
 <meta name="LLNLRandRdate" content="26 March 2009">
diff --git a/doc/html/help.shtml b/doc/html/help.shtml
index e0baef0e76cf4c4efac66e8dd80a38d5497097e7..390840fa6c59850f0ba20654dfd6ad899b0b6211 100644
--- a/doc/html/help.shtml
+++ b/doc/html/help.shtml
@@ -4,15 +4,15 @@
 <p>Try the following steps if you are having problems with SLURM:</p>
 <ol>
 <li>See if the problem is addressed in the <a href="faq.html">SLURM FAQ</a>,
-<a href="troubleshoot.html">SLURM Troubleshooting Guide</a> or the 
+<a href="troubleshoot.html">SLURM Troubleshooting Guide</a> or the
 <a href="http://groups.google.com/group/slurm-devel">slurm-dev mailing list archive</a>.</li>
-<li>For run-time problems, try running the command or daemons in verbose mode 
-(<span class="commandline">-v</span> option), and see if additional information 
+<li>For run-time problems, try running the command or daemons in verbose mode
+(<span class="commandline">-v</span> option), and see if additional information
 helps you resolve the problem.</li>
-<li>Customers of HP, Linux NetworX and others providing commercial support 
+<li>Customers of HP, Linux NetworX and others providing commercial support
 for SLURM should contact their support staff.</li>
 <li>Send a detailed description of the problem, the output from the command
-"scontrol show config", logs, back traces from any core files, etc. to 
+"scontrol show config", logs, back traces from any core files, etc. to
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>.</li>
 </ol>
 
diff --git a/doc/html/ibm.shtml b/doc/html/ibm.shtml
index 5eb7b6ac1db514154b0fe15ede927fb9aca2a21c..57ec96ca702fd0234f91b2eaf0338141e54cfb60 100644
--- a/doc/html/ibm.shtml
+++ b/doc/html/ibm.shtml
@@ -6,53 +6,53 @@
 
 <p>This document describes the unique features of SLURM on the
 IBM AIX computers with a Federation switch.
-You should be familiar with the SLURM's mode of operation on Linux clusters 
-before studying the relatively few differences in IBM system operation 
+You should be familiar with the SLURM's mode of operation on Linux clusters
+before studying the relatively few differences in IBM system operation
 described in this document.</p>
 
 <h2>User Tools</h2>
 
-<p>The normal set of SLURM user tools: srun, scancel, sinfo, smap, squeue and scontrol 
-provide all of the expected services except support for job steps. 
+<p>The normal set of SLURM user tools: srun, scancel, sinfo, smap, squeue and scontrol
+provide all of the expected services except support for job steps.
 While the srun command will launch the tasks of a job step on an IBM
 AIX system, it does not support use of the Federation switch or IBM's MPI.
-Job steps should be launched using IBM's poe command. 
+Job steps should be launched using IBM's poe command.
 This architecture insures proper operation of all IBM tools.</p>
 
 <p>You will use srun to submit a batch script to SLURM.
-This script should contain one or more invocations of poe to launch 
+This script should contain one or more invocations of poe to launch
 the tasks.
-If you want to run a job interactively, just execute poe directly. 
-Poe will recognize that it lacks a SLURM job allocation (the SLURM_JOB_ID 
-environment variable will be missing) and create the SLURM allocation 
+If you want to run a job interactively, just execute poe directly.
+Poe will recognize that it lacks a SLURM job allocation (the SLURM_JOB_ID
+environment variable will be missing) and create the SLURM allocation
 prior to launching tasks.</p>
 
-<p>Each poe invocation (or SLURM job step) can have it's own network 
+<p>Each poe invocation (or SLURM job step) can have it's own network
 specification.
 For example one poe may use IP mode communications and the next use
-User Space (US) mode communications. 
-This enhancement to normal poe functionality may be accomplished by 
+User Space (US) mode communications.
+This enhancement to normal poe functionality may be accomplished by
 setting the SLURM_NETWORK environment variable.
-The format of SLURM_NETWORK is "network.[protocol],[type],[usage],[mode]". 
-For example "network.mpi,en0,shared,ip". 
+The format of SLURM_NETWORK is "network.[protocol],[type],[usage],[mode]".
+For example "network.mpi,en0,shared,ip".
 See LoadLeveler documentation for more details.</p>
 
 <h2>Checkpoint</h2>
 
-<p>SLURM supports checkpoint via poe. 
-In order to enable checkpoint, the shell executing the poe command must 
-itself be initiated with the environment variable <b>CHECKPOINT=yes</b>. 
-One file is written for each node on which the job is executing, plus 
+<p>SLURM supports checkpoint via poe.
+In order to enable checkpoint, the shell executing the poe command must
+itself be initiated with the environment variable <b>CHECKPOINT=yes</b>.
+One file is written for each node on which the job is executing, plus
 another for the script executing poe.a
 By default, the checkpoint files will be written to the current working
 directory of the job.
-Names and locations of these files can be controlled using the 
+Names and locations of these files can be controlled using the
 environment variables <b>MP_CKPTFILE</b> and <b>MP_CKPTDIR</b>.
-Use the squeue command to identify the job and job step of interest. 
-To initiate a checkpoint in which the job step will continue execution, 
+Use the squeue command to identify the job and job step of interest.
+To initiate a checkpoint in which the job step will continue execution,
 use the command: <br>
 <b>scontrol check create <i>job_id.step_id</i></b><br>
-To initiate a checkpoint in which the job step will terminate afterwards, 
+To initiate a checkpoint in which the job step will terminate afterwards,
 use the command: <br>
 <b>scontrol check vacate <i>job_id.step_id</i></b></p>
 
@@ -60,27 +60,27 @@ use the command: <br>
 
 <p>Three unique components are required to use SLURM on an IBM system.</p>
 <ol>
-<li>The Federation switch plugin is required.  
+<li>The Federation switch plugin is required.
 This component is packaged with the SLURM distribution.</li>
-<li>There is a process tracking kernel extension required. 
-This is used to insure that all processes associated with a job 
+<li>There is a process tracking kernel extension required.
+This is used to insure that all processes associated with a job
 are tracked.
 SLURM normally uses session ID and process group ID on Linux systems,
-but these mechanisms can not prevent user processes from establishing 
-their own session or process group and thus "escape" from SLURM 
+but these mechanisms can not prevent user processes from establishing
+their own session or process group and thus "escape" from SLURM
 tracking.
-This kernel extension is not packaged with SLURM, but is available 
+This kernel extension is not packaged with SLURM, but is available
 upon request.</li>
-<li>The final component is a library that accepts poe library calls 
-and performs actions in SLURM to satisfy these requests, such 
-as launching tasks. 
-This library is based upon IBM Confidential information and is 
-not at this time available for distribution. 
-Interested parties are welcome to pursue the possible distribution 
+<li>The final component is a library that accepts poe library calls
+and performs actions in SLURM to satisfy these requests, such
+as launching tasks.
+This library is based upon IBM Confidential information and is
+not at this time available for distribution.
+Interested parties are welcome to pursue the possible distribution
 of this library with IBM and SLURM developers.</li>
 </ol>
-<p>Until this last issue is resolved, use of SLURM on an IBM AIX system 
-should not be viewed as a supported configuration (at least outside 
+<p>Until this last issue is resolved, use of SLURM on an IBM AIX system
+should not be viewed as a supported configuration (at least outside
 of LLNL, which established a contract with IBM for this purpose).</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/jobacct_gatherplugins.shtml b/doc/html/jobacct_gatherplugins.shtml
index ea779aaefdc964cf789132293af3ad977d9e21fe..ef31f4900aa97f57d389751b88be016694c8291b 100644
--- a/doc/html/jobacct_gatherplugins.shtml
+++ b/doc/html/jobacct_gatherplugins.shtml
@@ -31,8 +31,8 @@ information to the standard rusage information also gathered for each job.
 </ul>
 The <b>sacct</b> program can be used to display gathered data from regular
 accounting and from these plugins.
-<p>The programmer is urged to study 
-<span class="commandline">src/plugins/jobacct_gather/linux</span> and 
+<p>The programmer is urged to study
+<span class="commandline">src/plugins/jobacct_gather/linux</span> and
 <span class="commandline">src/common/jobacct_common.c/.h</span>
 for a sample implementation of a SLURM job accounting gather plugin.
 <p class="footer"><a href="#top">top</a>
@@ -42,109 +42,109 @@ for a sample implementation of a SLURM job accounting gather plugin.
 <p>All of the following functions are required. Functions which are not
 implemented must be stubbed.
 
-<p class="commandline">jobacctinfo_t *jobacct_gather_p_create(jobacct_id_t *jobacct_id) 
+<p class="commandline">jobacctinfo_t *jobacct_gather_p_create(jobacct_id_t *jobacct_id)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_gather_p_alloc() used to alloc a pointer to and initialize a 
+jobacct_gather_p_alloc() used to alloc a pointer to and initialize a
 new jobacctinfo structure.<br><br>
 You will need to free the information returned by this function!
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">tid</span> 
+<span class="commandline">tid</span>
 (input) id of the task send in (uint16_t)NO_VAL if no specfic task.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">jobacctinfo structure pointer</span> on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
-<p class="commandline">void jobacct_gather_p_destroy(jobacctinfo_t *jobacct) 
+<p class="commandline">void jobacct_gather_p_destroy(jobacctinfo_t *jobacct)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_free() used to free the allocation made by jobacct_gather_p_alloc().
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">jobacct</span> 
+<span class="commandline">jobacct</span>
 (input) structure to be freed.<br>
 <span class="commandline">none</span>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">none</span>
 
 <p class="commandline">
-int jobacct_gather_p_setinfo(jobacctinfo_t *jobacct, 
-                      enum jobacct_data_type type, void *data) 
+int jobacct_gather_p_setinfo(jobacctinfo_t *jobacct,
+                      enum jobacct_data_type type, void *data)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_setinfo() is called to set the values of a jobacctinfo_t to
 specific values based on inputs.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">jobacct</span> 
+<span class="commandline">jobacct</span>
 (input/output) structure to be altered.<br>
-<span class="commandline">type</span> 
+<span class="commandline">type</span>
 (input) enum of specific part of jobacct to alter.<br>
-<span class="commandline">data</span> 
+<span class="commandline">data</span>
 (input) corresponding data to set jobacct part to.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int jobacct_gather_p_getinfo(jobacctinfo_t *jobacct, 
-                      enum jobacct_data_type type, void *data) 
+int jobacct_gather_p_getinfo(jobacctinfo_t *jobacct,
+                      enum jobacct_data_type type, void *data)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_getinfo() is called to get the values of a jobacctinfo_t
 specific values based on inputs.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">jobacct</span> 
+<span class="commandline">jobacct</span>
 (input) structure to be queried.<br>
-<span class="commandline">type</span> 
+<span class="commandline">type</span>
 (input) enum of specific part of jobacct to get.<br>
-<span class="commandline">data</span> 
+<span class="commandline">data</span>
 (output) corresponding data to from jobacct part.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-void jobacct_gather_p_pack(jobacctinfo_t *jobacct, Buf buffer) 
+void jobacct_gather_p_pack(jobacctinfo_t *jobacct, Buf buffer)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_pack() pack jobacctinfo_t in a buffer to send across the network.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">jobacct</span> 
+<span class="commandline">jobacct</span>
 (input) structure to pack.<br>
-<span class="commandline">buffer</span> 
+<span class="commandline">buffer</span>
 (input/output) buffer to pack structure into.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">none</span>
 
 <p class="commandline">
-void jobacct_gather_p_unpack(jobacctinfo_t *jobacct, Buf buffer) 
+void jobacct_gather_p_unpack(jobacctinfo_t *jobacct, Buf buffer)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_gather_p_unpack() unpack jobacctinfo_t from a buffer received from 
+jobacct_gather_p_unpack() unpack jobacctinfo_t from a buffer received from
 the network.
 You will need to free the jobacctinfo_t returned by this function!
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">jobacct</span> 
+<span class="commandline">jobacct</span>
 (input/output) structure to fill.<br>
-<span class="commandline">buffer</span> 
+<span class="commandline">buffer</span>
 (input) buffer to unpack structure from.<br>
-<p style="margin-left:.2in"><b>Returns</b>: 
+<p style="margin-left:.2in"><b>Returns</b>:
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-void jobacct_gather_p_aggregate(jobacctinfo_t *dest, jobacctinfo_t *from) 
+void jobacct_gather_p_aggregate(jobacctinfo_t *dest, jobacctinfo_t *from)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_gather_p_aggregate() is called to aggregate and get max values from two 
+jobacct_gather_p_aggregate() is called to aggregate and get max values from two
 different jobacctinfo structures.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">dest</span> 
+<span class="commandline">dest</span>
 (input/output) initial structure to be applied to.<br>
-<span class="commandline">from</span> 
+<span class="commandline">from</span>
 (input) new info to apply to dest.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">none</span>
 
 <p class="footer"><a href="#top">top</a>
 
-<p class="commandline">int jobacct_gather_p_startpoll(int frequency) 
+<p class="commandline">int jobacct_gather_p_startpoll(int frequency)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_gather_p_startpoll() is called at the start of the slurmstepd, 
+jobacct_gather_p_startpoll() is called at the start of the slurmstepd,
 this starts a thread that should poll information to be queried at any time
-during throughout the end of the process.  
+during throughout the end of the process.
 Put global initialization here.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">frequency</span> (input) poll frequency for polling
@@ -153,9 +153,9 @@ thread.
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<p class="commandline">int jobacct_gather_p_endpoll() 
+<p class="commandline">int jobacct_gather_p_endpoll()
 <p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_gather_p_endpoll() is called when the process is finished to stop the 
+jobacct_gather_p_endpoll() is called when the process is finished to stop the
 polling thread.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">none</span>
@@ -163,7 +163,7 @@ polling thread.
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<p class="commandline">void jobacct_gather_p_suspend_poll() 
+<p class="commandline">void jobacct_gather_p_suspend_poll()
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_suspend_poll() is called when the process is suspended.
 This causes the polling thread to halt until the process is resumed.
@@ -181,10 +181,10 @@ This causes the polling thread to resume operation.
 <p style="margin-left:.2in"><b>Returns</b>:<br>
 <span class="commandline">none</span>
 
-<p class="commandline">int jobacct_gather_p_set_proctrack_container_id(uint32_t id) 
+<p class="commandline">int jobacct_gather_p_set_proctrack_container_id(uint32_t id)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_set_proctrack_container_id() is called after the
-proctrack container id is known at the start of the slurmstepd, 
+proctrack container id is known at the start of the slurmstepd,
 if using a proctrack plugin to track processes this will set the head
 of the process tree in the plugin.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
@@ -193,46 +193,46 @@ of the process tree in the plugin.
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<p class="commandline">int jobacct_gather_p_add_task(pid_t pid, uint16_t tid) 
+<p class="commandline">int jobacct_gather_p_add_task(pid_t pid, uint16_t tid)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_add_task() used to add a task to the poller.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline"> pid</span> (input) Process id <br> 
+<span class="commandline"> pid</span> (input) Process id <br>
 <span class="commandline"> tid</span> (input) slurm global task id
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<p class="commandline">jobacctinfo_t *jobacct_gather_p_stat_task(pid_t pid) 
+<p class="commandline">jobacctinfo_t *jobacct_gather_p_stat_task(pid_t pid)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_stat_task() used to get most recent information about task.
 You need to FREE the information returned by this function!
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline"> pid</span> (input) Process id  
+<span class="commandline"> pid</span> (input) Process id
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">jobacctinfo structure pointer</span> on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
-<p class="commandline">jobacctinfo_t *jobacct_gather_p_remove_task(pid_t pid) 
+<p class="commandline">jobacctinfo_t *jobacct_gather_p_remove_task(pid_t pid)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_remove_task() used to remove a task from the poller.
 You need to FREE the information returned by this function!
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline"> pid</span> (input) Process id  
+<span class="commandline"> pid</span> (input) Process id
 <p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">Pointer to removed jobacctinfo_t structure</span> 
+<span class="commandline">Pointer to removed jobacctinfo_t structure</span>
 on success, or <br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-void jobacct_gather_p_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct) 
+void jobacct_gather_p_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_gather_p_2_sacct() is called to transfer information from data structure 
+jobacct_gather_p_2_sacct() is called to transfer information from data structure
 jobacct to structure sacct.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">sacct</span> 
+<span class="commandline">sacct</span>
 (input/output) initial structure to be applied to.<br>
-<span class="commandline">jobacct</span> 
+<span class="commandline">jobacct</span>
 (input) jobacctinfo_t structure containing information to apply to sacct.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">none</span>
@@ -251,7 +251,7 @@ plugin and the frequency at which to gather information about running jobs.
 </dl>
 
 <h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM Job Accounting Gather API. Future 
+<p> This document describes version 1 of the SLURM Job Accounting Gather API. Future
 releases of SLURM may revise this API. A job accounting gather plugin conveys its
 ability to implement a particular API version using the mechanism outlined
 for SLURM plugins.
diff --git a/doc/html/jobcompplugins.shtml b/doc/html/jobcompplugins.shtml
index 7246c74599bb7b2ee913d3ce746f9c57fa419c56..c6721a929ae0a3ad3d9edb2892b7a517341ebd1e 100644
--- a/doc/html/jobcompplugins.shtml
+++ b/doc/html/jobcompplugins.shtml
@@ -3,15 +3,15 @@
 <h1><a name="top">SLURM Job Completion Logging Plugin API</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM job completion logging plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p> This document describes SLURM job completion logging plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 job completion logging plugins. This is version 0 of the API.</p>
-<p>SLURM job completion logging plugins are SLURM plugins that implement the SLURM 
-API for logging job information upon their completion. This may be used to log job information 
-to a text file, database, etc. The plugins must conform to the SLURM Plugin API with the following 
+<p>SLURM job completion logging plugins are SLURM plugins that implement the SLURM
+API for logging job information upon their completion. This may be used to log job information
+to a text file, database, etc. The plugins must conform to the SLURM Plugin API with the following
 specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;jobcomp.&quot; The minor type can be any recognizable 
+The major type must be &quot;jobcomp.&quot; The minor type can be any recognizable
 abbreviation for the type of scheduler. We recommend, for example:</p>
 <ul>
 <li><b>none</b>&#151;No job logging.</li>
@@ -22,36 +22,36 @@ abbreviation for the type of scheduler. We recommend, for example:</p>
 </ul>
 The <b>sacct</b> program with option <b>-c</b> can be used to display
 gathered data from database and filetxt plugins.
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for 
-job completion logging support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for
+job completion logging support.
 Note carefully, however, the versioning discussion below.</p>
-<p>The programmer is urged to study 
+<p>The programmer is urged to study
 <span class="commandline">src/plugins/jobcomp/filetxt/jobcomp_filetxt.c</span> and
-<span class="commandline">src/plugins/jobcomp/none/jobcomp_none.c</span> 
+<span class="commandline">src/plugins/jobcomp/none/jobcomp_none.c</span>
 for sample implementations of a SLURM job completion logging plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <span class="commandline">errno</span>  to allow SLURM to discover 
-as practically as possible the reason for any failed API call. Plugin-specific enumerated 
-integer values should be used when appropriate. It is desirable that these values 
-be mapped into the range ESLURM_JOBCOMP_MIN and ESLURM_JOBCOMP_MAX 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <span class="commandline">errno</span>  to allow SLURM to discover
+as practically as possible the reason for any failed API call. Plugin-specific enumerated
+integer values should be used when appropriate. It is desirable that these values
+be mapped into the range ESLURM_JOBCOMP_MIN and ESLURM_JOBCOMP_MAX
 as defined in <span class="commandline">slurm/slurm_errno.h</span>.
 The error number should be returned by the function
-<a href="#get_errno"><span class="commandline">slurm_jobcomp_get_errno()</span></a> 
-and this error number can be converted to an appropriate string description using the 
-<a href="#strerror"><span class="commandline">slurm_jobcomp_strerror()</span></a> 
+<a href="#get_errno"><span class="commandline">slurm_jobcomp_get_errno()</span></a>
+and this error number can be converted to an appropriate string description using the
+<a href="#strerror"><span class="commandline">slurm_jobcomp_strerror()</span></a>
 function described below.</p>
 
-<p>These values must not be used as return values in integer-valued functions 
-in the API. The proper error return value from integer-valued functions is SLURM_ERROR. 
-The implementation should endeavor to provide useful and pertinent information by 
-whatever means is practical. 
-Successful API calls are not required to reset any errno to a known value. However, 
-the initial value of any errno, prior to any error condition arising, should be 
+<p>These values must not be used as return values in integer-valued functions
+in the API. The proper error return value from integer-valued functions is SLURM_ERROR.
+The implementation should endeavor to provide useful and pertinent information by
+whatever means is practical.
+Successful API calls are not required to reset any errno to a known value. However,
+the initial value of any errno, prior to any error condition arising, should be
 SLURM_SUCCESS. </p>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -60,22 +60,22 @@ SLURM_SUCCESS. </p>
 
 <p class="commandline">int slurm_jobcomp_set_location (char * location);</p>
 <p style="margin-left:.2in"><b>Description</b>: Specify the location to be used for job logging.</p>
-<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline"> location</span>&nbsp; 
-&nbsp;&nbsp;(input) specification of where logging should be done. The interpretation of 
+<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline"> location</span>&nbsp;
+&nbsp;&nbsp;(input) specification of where logging should be done. The interpretation of
 this string is at the discresion of the plugin implementation.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <p class="commandline">int slurm_jobcomp_log_record (struct job_record *job_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note termin ation of a job 
+<p style="margin-left:.2in"><b>Description</b>: Note termin ation of a job
 with the specified characteristics.</p>
 <p style="margin-left:.2in"><b>Argument</b>: <br>
 <span class="commandline"> job_ptr</span>&nbsp;&nbsp;&nbsp;(input) Pointer to job record as defined
 in <i>src/slurmctld/slurmctld.h</i></p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -91,7 +91,7 @@ job completion logger specific error.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> errnum</span>&nbsp; &nbsp;&nbsp;(input) a job completion logger
 specific error code.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Pointer to string describing the error 
+<p style="margin-left:.2in"><b>Returns</b>: Pointer to string describing the error
 or NULL if no description found in this plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -112,7 +112,7 @@ void slurm_jobcomp_get_jobs(List job_list, List selected_steps, List selected_pa
 <p class="footer"><a href="#top">top</a></p>
 
 <p class="commandline">
-void slurm_jobcomp_archive(List selected_parts, void *params) 
+void slurm_jobcomp_archive(List selected_parts, void *params)
 <p style="margin-left:.2in"><b>Description</b>: used to archive old data.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">List selected_parts </span>
@@ -124,8 +124,8 @@ void slurm_jobcomp_archive(List selected_parts, void *params)
 
 
 <h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM job completion API. Future 
-releases of SLURM may revise this API. A job completion plugin conveys its ability 
+<p> This document describes version 1 of the SLURM job completion API. Future
+releases of SLURM may revise this API. A job completion plugin conveys its ability
 to implement a particular API version using the mechanism outlined for SLURM plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/lci.7.tutorial.pdf b/doc/html/lci.7.tutorial.pdf
index 4232dab419cffb2a46cdcdda9f870c3e6a11ec0e..4f842e57fe1668f3562d13007ea8930a58f022a8 100644
Binary files a/doc/html/lci.7.tutorial.pdf and b/doc/html/lci.7.tutorial.pdf differ
diff --git a/doc/html/linuxstyles.css b/doc/html/linuxstyles.css
index 5f4b15b8a87b2908569d9ae7584a315e5ebcc406..1b40d0c4731a2193b88d31f8efb773f76614f53e 100644
--- a/doc/html/linuxstyles.css
+++ b/doc/html/linuxstyles.css
@@ -3,7 +3,7 @@ html {
   margin-bottom: 1px;
 }
 
-body { 
+body {
   margin-top:20px;
   padding:0px;
   background-color: #FFFFFF;
@@ -71,7 +71,7 @@ pre {font-size: 90%; font-family:"Courier New", Courier, monospace}
   font-size:90%;
 }
 
-.pnav {	
+.pnav {
   font-size: 95%;
   font-weight: bold;
   color: #002280;
@@ -97,7 +97,7 @@ pre {font-size: 90%; font-family:"Courier New", Courier, monospace}
 }
 
 #footer2 #left2 {
-  clear: both; 
+  clear: both;
   float: left;
   padding-top:3px;
   background-color: #FFFFFF;
@@ -129,11 +129,11 @@ pre {font-size: 90%; font-family:"Courier New", Courier, monospace}
   padding: 0px 5px 0px 10px;
 }
 
-.figcaption {  
+.figcaption {
   font-family: Arial, Verdana, Helvetica, sans-serif;
   text-align: left;
   line-height:100%;
-  font-size: 70%; 
+  font-size: 70%;
   font-weight: bold;
   padding-top: 10px;
   color: #002280
@@ -177,14 +177,14 @@ a:active {
  text-decoration: underline
 }
 
-a.nav:link {	
+a.nav:link {
   font-size: 95%;
   color: #002280;
   font-weight: bold;
   text-decoration: none;
 }
 
-a.nav:visited {	
+a.nav:visited {
   font-size: 95%;
   color: #002280;
   font-weight: bold;
@@ -196,7 +196,7 @@ a.nav:hover {
   font-weight: bold;
   text-decoration: none;
 }
-a.nav:active {	
+a.nav:active {
   font-size: 95%;
   color: #002280;
   font-weight: bold;
@@ -209,7 +209,7 @@ a.footer:link {
   text-decoration: none;
 }
 
-a.footer:visited {	
+a.footer:visited {
   font-size: 70%;
   color: #002280;
   text-decoration: none;
diff --git a/doc/html/mail.shtml b/doc/html/mail.shtml
index c8767129d7f3a4026320b93b5caaea7825c0a4fa..6ea86740fdcb415bbba82f8b63cc204228803bf1 100644
--- a/doc/html/mail.shtml
+++ b/doc/html/mail.shtml
@@ -1,16 +1,16 @@
 <!--#include virtual="header.txt"-->
 
 <h1>Mailing Lists</h1>
-<p>We maintain two SLURM mailing lists:</p> 
+<p>We maintain two SLURM mailing lists:</p>
 <ul>
 <li><b>slurm-announce</b> is designated for communications about SLURM releases
 [low traffic].</li>
 <li><b>slurm-dev</b> is designated for communications to SLURM developers
 [high traffic at times].</li>
 </ul>
-<p>To subscribe to either list, send a message to 
-<a href="mailto:majordomo@lists.llnl.gov">majordomo@lists.llnl.gov</a> with the body of the 
-message containing the word "subscribe" followed by the list name and your e-mail address 
+<p>To subscribe to either list, send a message to
+<a href="mailto:majordomo@lists.llnl.gov">majordomo@lists.llnl.gov</a> with the body of the
+message containing the word "subscribe" followed by the list name and your e-mail address
 (if not the sender). For example: <br>
 <i>subscribe slurm-announce bob@yahoo.com</i></p>
 
diff --git a/doc/html/maui.shtml b/doc/html/maui.shtml
index b83dafd3a8bc441d7fca45764f387338a0d93e3d..317ea834625c745c65f2d80d096c4962a54ebec3 100644
--- a/doc/html/maui.shtml
+++ b/doc/html/maui.shtml
@@ -3,13 +3,13 @@
 <h1>Maui Scheduler Integration Guide</h1>
 <h2>Overview</h2>
 <p>Maui configuration is quite complicated and is really beyond the scope
-of any documents we could supply with SLURM.  
-The best resource for Maui configuration information is the 
-online documents at Cluster Resources Inc.: 
+of any documents we could supply with SLURM.
+The best resource for Maui configuration information is the
+online documents at Cluster Resources Inc.:
 <a href="http://www.clusterresources.com/products/maui/docs/mauiadmin.shtml">
 http://www.clusterresources.com/products/maui/docs/mauiadmin.shtml</a>.
 
-<p>Maui uses SLURM commands and a wiki interface to communicate. See the 
+<p>Maui uses SLURM commands and a wiki interface to communicate. See the
 <a href="http://www.clusterresources.com/products/mwm/docs/wiki/wikiinterface.shtml">
 Wiki Interface Specification</a> and
 <a href="http://www.clusterresources.com/products/mwm/docs/wiki/socket.shtml">
@@ -18,14 +18,14 @@ Wiki Socket Protocol Description</a> for more information.</p>
 <h2>Configuration</h2>
 <p>First, download the Maui scheduler kit from their web site
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php</a>. 
-Note: maui-3.2.6p9 has been validated with SLURM, other versions 
+http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php</a>.
+Note: maui-3.2.6p9 has been validated with SLURM, other versions
 should also work properly.
-We anticipate the Maui Scheduler to be upgraded to utilize a more 
-extensive interface to Slurm in early 2007. 
-The newer Maui Scheduler will be able to utilize a more ful featured 
-interface to Slurm as descripted in the 
-<a href="moab.html">Moab Cluster Suite Integration Guide</a>. 
+We anticipate the Maui Scheduler to be upgraded to utilize a more
+extensive interface to Slurm in early 2007.
+The newer Maui Scheduler will be able to utilize a more ful featured
+interface to Slurm as descripted in the
+<a href="moab.html">Moab Cluster Suite Integration Guide</a>.
 This guide will be upgrade at that time.</p>
 
 <h3>Maui configuration</h3>
@@ -35,14 +35,14 @@ Then build Maui from its source distribution. This is a two step process:</p>
 <li>./configure --with-key=42 --with-wiki
 <li>gmake
 </ol>
-<p>The key of 42 is arbitrary. You can use any value, but it will need to 
-be a number no larger than 4,294,967,295 (2^32) and specify the same 
+<p>The key of 42 is arbitrary. You can use any value, but it will need to
+be a number no larger than 4,294,967,295 (2^32) and specify the same
 value as a SLURM configuration parameter described below.
-Maui developers have assured us the authentication key will eventually be 
+Maui developers have assured us the authentication key will eventually be
 set in a configuration file rather than at build time.</p>
 
 <p>Update the Maui configuration file <i>maui.conf</i> (Copy the file
-maui-3.2.6p9/maui.cfg.dist to maui.conf). Add the following configuration 
+maui-3.2.6p9/maui.cfg.dist to maui.conf). Add the following configuration
 parameters to maui.conf:</p>
 <pre>
 RMCFG[host]       TYPE=WIKI
@@ -50,17 +50,17 @@ RMPORT            7321            # selected port
 RMHOST            host
 RMAUTHTYPE[host]  CHECKSUM
 </pre>
-<p><i>host</i> is the hostname where the SLURM controller is running. 
-This must match the value of <i>ControlMachine</i> configured in 
+<p><i>host</i> is the hostname where the SLURM controller is running.
+This must match the value of <i>ControlMachine</i> configured in
 slurm.conf. Note that <i>localhost</i> doesn't work. If you run Maui
-and SLURM on the same machine, you must specify the actual host name. 
-The above example uses a TCP port number of 7321 for 
-communications between SLURM and Maui, but you can pick any port that 
+and SLURM on the same machine, you must specify the actual host name.
+The above example uses a TCP port number of 7321 for
+communications between SLURM and Maui, but you can pick any port that
 is available and accessible. You can also set a polling interval with</p>
 <pre>
 RMPOLLINTERVAL  00:00:20
 </pre>
-<p>It may be desired to have Maui poll SLURM quite often -- 
+<p>It may be desired to have Maui poll SLURM quite often --
 in this case every 20 seconds.
 Note that a job submitted to an idle cluster will not be initiated until
 the Maui daemon polls SLURM and decides to make it run, so the value of
@@ -68,9 +68,9 @@ RMPOLLINTERVAL should be set to a value appropriate for your site
 considering both the desired system responsiveness and the overhead of
 executing Maui daemons too frequently.</p>
 
-<p>In order for Maui to be able to access your SLURM partition, you will 
-need to define in maui.conf a partition with the same name as the SLURM 
-partition(s). For example if nodes "linux[0-3]" are in SLURM partition 
+<p>In order for Maui to be able to access your SLURM partition, you will
+need to define in maui.conf a partition with the same name as the SLURM
+partition(s). For example if nodes "linux[0-3]" are in SLURM partition
 "PartA", slurm.conf includes a line of this sort:</p>
 <pre>
 PartitionName=PartA Default=yes Nodes=linux[0-3]
@@ -99,29 +99,29 @@ SchedulerType=sched/wiki
 SchedulerPort=7321
 SchedulerAuth=42 (for Slurm version 1.1 and earlier only)
 </pre>
-<p>In this case, "SchedulerAuth" has been set to 42, which was the 
-authentication key specified when Maui was configured above. 
+<p>In this case, "SchedulerAuth" has been set to 42, which was the
+authentication key specified when Maui was configured above.
 Just make sure the numbers match.</p>
 
-<p>For SLURM version 1.2 or higher, the authentication key 
-is stored in a file specific to the wiki-plugin named 
+<p>For SLURM version 1.2 or higher, the authentication key
+is stored in a file specific to the wiki-plugin named
 <i>wiki.conf</i>.
 This file should be protected from reading by users.
 It only needs to be readable by <i>SlurmUser</i> (as configured
 in <i>slurm.conf</i>) and only needs to exist on computers
 where the <i>slurmctld</i> daemon executes.
 More information about wiki.conf is available in
-a man page distributed with SLURM, although that 
-includes a description of keywords presently only 
-supported by the sched/wiki2 plugin for use with the 
+a man page distributed with SLURM, although that
+includes a description of keywords presently only
+supported by the sched/wiki2 plugin for use with the
 Moab Scheduler.</p>
 
 <p>SLURM version 2.0 and higher have internal scheduling capabilities
 that are not compatable with Maui.
 <ol>
-<li>Do not configure SLURM to use the "priority/multifactor" plugin 
+<li>Do not configure SLURM to use the "priority/multifactor" plugin
 as it would set job priorities which conflict with those set by Maui.</li>
-<li>Do not use SLURM's <a href="reservations.html">reservation</a> 
+<li>Do not use SLURM's <a href="reservations.html">reservation</a>
 mechanism, but use that offered by Maui.</li>
 <li>Do not use SLURM's <a href="resource_limits.html">resource limits</a>
 as those may conflict with those managed by Maui.</li>
@@ -130,37 +130,37 @@ as those may conflict with those managed by Maui.</li>
 
 <p>The wiki.conf keywords currently supported by Maui include:</p>
 
-<p><b>AuthKey</b> is a DES based encryption key used to sign 
-communications between SLURM and Maui or Moab. 
-This use of this key is essential to insure that a user 
-not build his own program to cancel other user's jobs in 
+<p><b>AuthKey</b> is a DES based encryption key used to sign
+communications between SLURM and Maui or Moab.
+This use of this key is essential to insure that a user
+not build his own program to cancel other user's jobs in
 SLURM.
-This should be no more than 32-bit unsigned integer and match 
-the the encryption key in Maui (<i>--with-key</i> on the 
-configure line) or Moab (<i>KEY</i> parameter in the 
-<i>moab-private.cfg</i> file). 
-Note that SLURM's wiki plugin does not include a mechanism 
-to submit new jobs, so even without this key nobody could 
+This should be no more than 32-bit unsigned integer and match
+the the encryption key in Maui (<i>--with-key</i> on the
+configure line) or Moab (<i>KEY</i> parameter in the
+<i>moab-private.cfg</i> file).
+Note that SLURM's wiki plugin does not include a mechanism
+to submit new jobs, so even without this key nobody could
 run jobs as another user.</p>
 
 <p><b>ExcludePartitions</b> is used to identify partitions
-whose jobs are to be scheduled directly by SLURM rather 
-than Maui. 
-These jobs will be scheduled on a First-Come-First-Served 
-basis. 
-This may provide faster response times than Maui scheduling. 
+whose jobs are to be scheduled directly by SLURM rather
+than Maui.
+These jobs will be scheduled on a First-Come-First-Served
+basis.
+This may provide faster response times than Maui scheduling.
 Maui will account for and report the jobs, but their initiation
 will be outside of Maui's control.
-Note that Maui controls for resource reservation, fair share 
+Note that Maui controls for resource reservation, fair share
 scheduling, etc. will not apply to the initiation of these jobs.
 If more than one partition is to be scheduled directly by
 Slurm, use a comma separator between their names.</p>
 
-<p><b>HidePartitionJobs</b> identifies partitions whose jobs are not 
+<p><b>HidePartitionJobs</b> identifies partitions whose jobs are not
 to be reported to Maui.
 These jobs will not be accounted for or otherwise visible to Maui.
 Any partitions listed here must also be listed in <b>ExcludePartitions</b>.
-If more than one partition is to have its jobs hidden, use a comma 
+If more than one partition is to have its jobs hidden, use a comma
 separator between their names.</p>
 
 <p>Here is a sample <i>wiki.conf</i> file</p>
@@ -171,7 +171,7 @@ separator between their names.</p>
 # Matches Maui's --with-key configuration parameter
 AuthKey=42
 #
-# SLURM to directly schedule "debug" partition 
+# SLURM to directly schedule "debug" partition
 # and hide the jobs from Maui
 ExcludePartitions=debug
 HidePartitionJobs=debug
diff --git a/doc/html/mc_support.gif b/doc/html/mc_support.gif
index cc2180292c6a9bdfd494aab235e5aec6bc40536d..0451b4348f0c70a48ef143bc79a417775c41b4ea 100644
Binary files a/doc/html/mc_support.gif and b/doc/html/mc_support.gif differ
diff --git a/doc/html/mc_support.shtml b/doc/html/mc_support.shtml
index 0d1772ab8a2476529ab78289fcfc130d14c7d8c5..5123b9574631f10772e42646bda19681d8589eeb 100644
--- a/doc/html/mc_support.shtml
+++ b/doc/html/mc_support.shtml
@@ -150,7 +150,7 @@ the affinity to be set, the task/affinity plugin must be first enabled in
 slurm.conf:
 
 <PRE>
-TaskPlugin=task/affinity          # enable task affinity   
+TaskPlugin=task/affinity          # enable task affinity
 </PRE>
 
 <p>See the "Task Launch" section if generating slurm.conf via
@@ -361,7 +361,7 @@ each node.  In some cases, it is more convenient to be able to
 request that no more than a specific number of ntasks be invoked
 on each node, socket, or core.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
-assigned to each node while allowing the OpenMP portion to utilize 
+assigned to each node while allowing the OpenMP portion to utilize
 all of the parallelism present in the node, or submitting a single
 setup/cleanup/monitoring job to each node of a pre-existing
 allocation as one step in a larger job script.
@@ -718,7 +718,7 @@ trivial and that it assumes that users are experts.</p>
 <p>Several extensions have also been made to the other SLURM utilities to
 make working with multi-core/multi-threaded systems easier.</p>
 
-<!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --> 
+<!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
 <h3>sinfo</h3>
 
 <p>The long version (-l) of the sinfo node listing (-N) has been
@@ -727,8 +727,8 @@ node.  For example:
 
 <PRE>
 % sinfo -N
-NODELIST     NODES PARTITION STATE 
-hydra[12-15]     4    parts* idle  
+NODELIST     NODES PARTITION STATE
+hydra[12-15]     4    parts* idle
 
 % sinfo -lN
 Thu Sep 14 17:47:13 2006
@@ -758,13 +758,13 @@ the following identifiers are available:</p>
 
 <PRE>
 % sinfo -o '%9P %4c %8z %8X %8Y %8Z'
-PARTITION CPUS S:C:T    SOCKETS  CORES    THREADS 
-parts*    4    2:2:1    2        2        1       
+PARTITION CPUS S:C:T    SOCKETS  CORES    THREADS
+parts*    4    2:2:1    2        2        1
 </PRE>
 
 <p>See also 'sinfo --help' and 'man sinfo'</p>
 
-<!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --> 
+<!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
 <h3>squeue</h3>
 
 <p>For user specified output formats (-o/--format) and sorting (-S/--sort),
@@ -797,23 +797,23 @@ the following identifiers are available:</p>
 <PRE>
 % squeue -o '%.5i %.2t %.4M %.5D %7X %7Y %7Z %7z %R'
 JOBID ST TIME NODES SOCKETS CORES   THREADS S:C:T   NODELIST(REASON)
-   17 PD 0:00     1 2       2       1       2:2:1   (Resources) 
-   18 PD 0:00     1 2       2       1       2:2:1   (Resources) 
-   19 PD 0:00     1 2       2       1       2:2:1   (Resources) 
-   13  R 1:27     1 2       2       1       2:2:1   hydra12 
-   14  R 1:26     1 2       2       1       2:2:1   hydra13 
-   15  R 1:26     1 2       2       1       2:2:1   hydra14 
-   16  R 1:26     1 2       2       1       2:2:1   hydra15 
+   17 PD 0:00     1 2       2       1       2:2:1   (Resources)
+   18 PD 0:00     1 2       2       1       2:2:1   (Resources)
+   19 PD 0:00     1 2       2       1       2:2:1   (Resources)
+   13  R 1:27     1 2       2       1       2:2:1   hydra12
+   14  R 1:26     1 2       2       1       2:2:1   hydra13
+   15  R 1:26     1 2       2       1       2:2:1   hydra14
+   16  R 1:26     1 2       2       1       2:2:1   hydra15
 
 % squeue -o '%.5i %.2t %.4M %.5D %9c %11H %9I %11J'
 JOBID ST TIME NODES MIN_PROCS MIN_SOCKETS MIN_CORES MIN_THREADS
-   17 PD 0:00     1 1         4           2         1          
-   18 PD 0:00     1 1         4           2         1          
-   19 PD 0:00     1 1         4           2         1          
-   13  R 1:29     1 0         0           0         0          
-   14  R 1:28     1 0         0           0         0          
-   15  R 1:28     1 0         0           0         0          
-   16  R 1:28     1 0         0           0         0          
+   17 PD 0:00     1 1         4           2         1
+   18 PD 0:00     1 1         4           2         1
+   19 PD 0:00     1 1         4           2         1
+   13  R 1:29     1 0         0           0         0
+   14  R 1:28     1 0         0           0         0
+   15  R 1:28     1 0         0           0         0
+   16  R 1:28     1 0         0           0         0
 </PRE>
 
 <p>
@@ -858,7 +858,7 @@ the job starts running).
 
 <p>See also 'squeue --help' and 'man squeue'</p>
 
-<!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --> 
+<!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
 <h3>scontrol</h3>
 
 <p>The following job settings can be adjusted using scontrol:
@@ -884,13 +884,13 @@ Constraints:
 
 % squeue -o '%.5i %.2t %.4M %.5D %9c %11H %9I %11J'
 JOBID ST TIME NODES MIN_PROCS MIN_SOCKETS MIN_CORES MIN_THREADS
-   17 PD 0:00     1 1         4           2         1          
-   18 PD 0:00     1 1         8           4         2          
-   19 PD 0:00     1 1         4           2         1          
-   13  R 1:35     1 0         0           0         0          
-   14  R 1:34     1 0         0           0         0          
-   15  R 1:34     1 0         0           0         0          
-   16  R 1:34     1 0         0           0         0          
+   17 PD 0:00     1 1         4           2         1
+   18 PD 0:00     1 1         8           4         2
+   19 PD 0:00     1 1         4           2         1
+   13  R 1:35     1 0         0           0         0
+   14  R 1:34     1 0         0           0         0
+   15  R 1:34     1 0         0           0         0
+   16  R 1:34     1 0         0           0         0
 </PRE>
 
 <p>The 'scontrol show job' command can be used to display
@@ -934,7 +934,7 @@ via <a href="configurator.html">configurator.html</a>.
 task/affinity plugin must be first enabled in slurm.conf:
 
 <PRE>
-TaskPlugin=task/affinity          # enable task affinity   
+TaskPlugin=task/affinity          # enable task affinity
 </PRE>
 
 <p>This setting is part of the task launch specific parameters:</p>
@@ -942,11 +942,11 @@ TaskPlugin=task/affinity          # enable task affinity
 <PRE>
 # o Define task launch specific parameters
 #
-#    "TaskProlog" : Define a program to be executed as the user before each 
+#    "TaskProlog" : Define a program to be executed as the user before each
 #                   task begins execution.
-#    "TaskEpilog" : Define a program to be executed as the user after each 
+#    "TaskEpilog" : Define a program to be executed as the user after each
 #                   task terminates.
-#    "TaskPlugin" : Define a task launch plugin. This may be used to 
+#    "TaskPlugin" : Define a task launch plugin. This may be used to
 #                   provide resource management within a node (e.g. pinning
 #                   tasks to specific processors). Permissible values are:
 #      "task/none"     : no task launch actions, the default.
@@ -961,7 +961,7 @@ TaskPlugin=task/affinity          # enable task affinity
 
 <p>SLURM will automatically detect the architecture of the nodes used
 by examining /proc/cpuinfo.  If, for some reason, the administrator
-wishes to override the automatically selected architecture, the 
+wishes to override the automatically selected architecture, the
 NodeName parameter can be used in combination with FastSchedule:
 
 <PRE>
@@ -976,25 +976,25 @@ using NodeName:
 #
 #  o Node configuration
 #
-#    The configuration information of nodes (or machines) to be managed 
+#    The configuration information of nodes (or machines) to be managed
 #    by SLURM is described here. The only required value in this section
-#    of the config file is the "NodeName" field, which specifies the 
+#    of the config file is the "NodeName" field, which specifies the
 #    hostnames of the node or nodes to manage. It is recommended, however,
 #    that baseline values for the node configuration be established
-#    using the following parameters (see slurm.config(5) for more info): 
+#    using the following parameters (see slurm.config(5) for more info):
 #
 #     "NodeName"   : The only required node configuration parameter, NodeName
 #                    specifies a node or set of nodes to be managed by SLURM.
 #                    The special NodeName of "DEFAULT" may be used to establish
 #                    default node configuration parameters for subsequent node
-#                    records. Typically this would be the string that 
-#                    `/bin/hostname -s` would return on the node. However 
-#                    NodeName may be an arbitrary string if NodeHostname is 
+#                    records. Typically this would be the string that
+#                    `/bin/hostname -s` would return on the node. However
+#                    NodeName may be an arbitrary string if NodeHostname is
 #                    used (see below).
 #
-#     "Feature"    : comma separated list of "features" for the given node(s) 
+#     "Feature"    : comma separated list of "features" for the given node(s)
 #
-#     "NodeAddr"   : preferred address for contacting the node. This may be 
+#     "NodeAddr"   : preferred address for contacting the node. This may be
 #                    either a name or IP address.
 #
 #     "NodeHostname"
@@ -1029,7 +1029,7 @@ using NodeName:
 #     "Weight"     : Priority of node for scheduling purposes
 #
 #   If any of the above values are set for a node or group of nodes, and
-#   that node checks in to the slurm controller with less than the 
+#   that node checks in to the slurm controller with less than the
 #   configured resources, the node's state will be set to DOWN, in order
 #   to avoid scheduling any jobs on a possibly misconfigured machine.
 #
diff --git a/doc/html/moab.shtml b/doc/html/moab.shtml
index 30bdfdae0be76a3082d74ba4361103aaef80c9a2..79c85b9495e239dcf66270c9dac51ab9073d2d33 100644
--- a/doc/html/moab.shtml
+++ b/doc/html/moab.shtml
@@ -2,28 +2,28 @@
 
 <h1>Moab Cluster Suite Integration Guide</h1>
 <h2>Overview</h2>
-<p>Moab Cluster Suite configuration is quite complicated and is 
-beyond the scope of any documents we could supply with SLURM.  
-The best resource for Moab configuration information is the 
-online documents at Cluster Resources Inc.: 
+<p>Moab Cluster Suite configuration is quite complicated and is
+beyond the scope of any documents we could supply with SLURM.
+The best resource for Moab configuration information is the
+online documents at Cluster Resources Inc.:
 <a href="http://www.clusterresources.com/products/mwm/docs/slurmintegration.shtml">
 http://www.clusterresources.com/products/mwm/docs/slurmintegration.shtml</a>.</p>
 
-<p>Moab uses SLURM commands and a wiki interface to communicate. See the 
+<p>Moab uses SLURM commands and a wiki interface to communicate. See the
 <a href="http://www.clusterresources.com/products/mwm/docs/wiki/wikiinterface.shtml">
 Wiki Interface Specification</a> and
 <a href="http://www.clusterresources.com/products/mwm/docs/wiki/socket.shtml">
 Wiki Socket Protocol Description</a> for more information.</p>
 
-<p>Somewhat more current information about SLURM's implementation of the 
+<p>Somewhat more current information about SLURM's implementation of the
 wiki interface was developed by Michal Novotny (Masaryk University, Czech Republic)
 and can be found <a href="http://www.fi.muni.cz/~xnovot19/wiki2.html">here</a>.</p>
 
 <h2>Configuration</h2>
 <p>First, download the Moab scheduler kit from their web site
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
-http://www.clusterresources.com/pages/products/moab-cluster-suite.php</a>.<br> 
-<b>Note:</b> Use Moab version 5.0.0 or higher and SLURM version 1.1.28 
+http://www.clusterresources.com/pages/products/moab-cluster-suite.php</a>.<br>
+<b>Note:</b> Use Moab version 5.0.0 or higher and SLURM version 1.1.28
 or higher.</p>
 
 <h3>SLURM configuration</h3>
@@ -34,14 +34,14 @@ or higher.</p>
 SchedulerType=sched/wiki2
 SchedulerPort=7321
 </pre>
-<p>Running multiple jobs per mode can be accomplished in two different 
-ways. 
+<p>Running multiple jobs per mode can be accomplished in two different
+ways.
 The <i>SelectType=select/cons_res</i> parameter can be used to let
-SLURM allocate the individual processors, memory, and other 
+SLURM allocate the individual processors, memory, and other
 consumable resources (in SLURM version 1.2.1 or higher).
-Alternately, <i>SelectType=select/linear</i> or 
-<i>SelectType=select/bluegene</i> can be used with the 
-<i>Shared=yes</i> or <i>Shared=force</i> parameter in 
+Alternately, <i>SelectType=select/linear</i> or
+<i>SelectType=select/bluegene</i> can be used with the
+<i>Shared=yes</i> or <i>Shared=force</i> parameter in
 partition configuration specifications.</p>
 
 <p>The default value of <i>SchedulerPort</i> is 7321.</p>
@@ -49,56 +49,56 @@ partition configuration specifications.</p>
 <p>SLURM version 2.0 and higher have internal scheduling capabilities
 that are not compatable with Moab.
 <ol>
-<li>Do not configure SLURM to use the "priority/multifactor" plugin 
+<li>Do not configure SLURM to use the "priority/multifactor" plugin
 as it would set job priorities which conflict with those set by Moab.</li>
-<li>Do not use SLURM's <a href="reservations.html">reservation</a> 
+<li>Do not use SLURM's <a href="reservations.html">reservation</a>
 mechanism, but use that offered by Moab.</li>
 <li>Do not use SLURM's <a href="resource_limits.html">resource limits</a>
 as those may conflict with those managed by Moab.</li>
 </ol></p>
 
 <h4>SLURM commands</h4>
-<p> Note that the <i>srun --immediate</i> option is not compatible 
-with Moab. 
-All jobs must wait for Moab to schedule them rather than being 
+<p> Note that the <i>srun --immediate</i> option is not compatible
+with Moab.
+All jobs must wait for Moab to schedule them rather than being
 scheduled immediately by SLURM.</p>
 
 <a name="wiki.conf"><h4>wiki.conf</h4></a>
-<p>SLURM's wiki configuration is stored in a file 
-specific to the wiki-plugin named <i>wiki.conf</i>. 
-This file should be protected from reading by users. 
-It only needs to be readable by <i>SlurmUser</i> (as configured 
-in <i>slurm.conf</i>) and only needs to exist on computers 
+<p>SLURM's wiki configuration is stored in a file
+specific to the wiki-plugin named <i>wiki.conf</i>.
+This file should be protected from reading by users.
+It only needs to be readable by <i>SlurmUser</i> (as configured
+in <i>slurm.conf</i>) and only needs to exist on computers
 where the <i>slurmctld</i> daemon executes.
-More information about wiki.conf is available in 
+More information about wiki.conf is available in
 a man page distributed with SLURM.</p>
 
 <p>The currently supported wiki.conf keywords include:</p>
 
-<p><b>AuthKey</b> is a DES based encryption key used to sign 
-communications between SLURM and Maui or Moab. 
-This use of this key is essential to insure that a user 
-not build his own program to cancel other user's jobs in 
+<p><b>AuthKey</b> is a DES based encryption key used to sign
+communications between SLURM and Maui or Moab.
+This use of this key is essential to insure that a user
+not build his own program to cancel other user's jobs in
 SLURM.
-This should be no more than 32-bit unsigned integer and match 
-the the encryption key in Maui (<i>--with-key</i> on the 
-configure line) or Moab (<i>KEY</i> parameter in the 
-<i>moab-private.cfg</i> file). 
-Note that SLURM's wiki plugin does not include a mechanism 
-to submit new jobs, so even without this key nobody could 
+This should be no more than 32-bit unsigned integer and match
+the the encryption key in Maui (<i>--with-key</i> on the
+configure line) or Moab (<i>KEY</i> parameter in the
+<i>moab-private.cfg</i> file).
+Note that SLURM's wiki plugin does not include a mechanism
+to submit new jobs, so even without this key nobody could
 run jobs as another user.</p>
 
-<p><b>EPort</b> is an event notification port in Moab. 
-When a job is submitted to or terminates in SLURM, 
+<p><b>EPort</b> is an event notification port in Moab.
+When a job is submitted to or terminates in SLURM,
 Moab is sent a message on this port to begin an attempt
 to schedule the computer.
-This numeric value should match <i>EPORT</i> configured 
+This numeric value should match <i>EPORT</i> configured
 in the <i>moab.cnf</i> file.</p>
 
 <p><b>EHost</b> is the event notification host for Moab.
-This identifies the computer on which the Moab daemons 
+This identifies the computer on which the Moab daemons
 executes which should be notified of events.
-By default EHost will be identical in value to the 
+By default EHost will be identical in value to the
 ControlAddr configured in slurm.conf.</p>
 
 <p><b>EHostBackup</b> is the event notification backup host for Moab.
@@ -108,52 +108,52 @@ By default EHostBackup will be identical in value to the
 BackupAddr configured in slurm.conf.</p>
 
 <p><b>ExcludePartitions</b> is used to identify partitions
-whose jobs are to be scheduled directly by SLURM rather 
-than Moab. 
-This only effects jobs which are submitted using Slurm 
+whose jobs are to be scheduled directly by SLURM rather
+than Moab.
+This only effects jobs which are submitted using Slurm
 commands (i.e. srun, salloc or sbatch, NOT msub from Moab).
-These jobs will be scheduled on a First-Come-First-Served 
-basis. 
-This may provide faster response times than Moab scheduling. 
+These jobs will be scheduled on a First-Come-First-Served
+basis.
+This may provide faster response times than Moab scheduling.
 Moab will account for and report the jobs, but their initiation
 will be outside of Moab's control.
-Note that Moab controls for resource reservation, fair share 
+Note that Moab controls for resource reservation, fair share
 scheduling, etc. will not apply to the initiation of these jobs.
 If more than one partition is to be scheduled directly by
 Slurm, use a comma separator between their names.</p>
 
-<p><b>HidePartitionJobs</b> identifies partitions whose jobs are not 
+<p><b>HidePartitionJobs</b> identifies partitions whose jobs are not
 to be reported to Moab.
 These jobs will not be accounted for or otherwise visible to Moab.
 Any partitions listed here must also be listed in <b>ExcludePartitions</b>.
-If more than one partition is to have its jobs hidden, use a comma 
+If more than one partition is to have its jobs hidden, use a comma
 separator between their names.</p>
 
-<p><b>HostFormat</b> controls the format of job task lists built 
+<p><b>HostFormat</b> controls the format of job task lists built
 by Slurm and reported to Moab.
-The default value is "0", for which each host name is listed 
+The default value is "0", for which each host name is listed
 individually, once per processor (e.g. "tux0:tux0:tux1:tux1:...").
-A value of "1" uses Slurm hostlist expressions with processor 
+A value of "1" uses Slurm hostlist expressions with processor
 counts (e.g. "tux[0-16]*2").
 This is currently experimental.
 
-<p><b>JobAggregationTime</b> is used to avoid notifying Moab 
+<p><b>JobAggregationTime</b> is used to avoid notifying Moab
 of large numbers of events occurring about the same time.
 If an event occurs within this number of seconds since Moab was
 last notified of an event, another notification is not sent.
 This should be an integer number of seconds.
 The default value is 10 seconds.
-The value should match <i>JOBAGGREGATIONTIME</i> configured 
+The value should match <i>JOBAGGREGATIONTIME</i> configured
 in the <i>moab.cnf</i> file.</p>
 
-<p><b>JobPriority</b> controls the scheduling of newly arriving 
-jobs in SLURM. 
-SLURM can either place all newly arriving jobs in a HELD state 
-(priority = 0) and let Moab decide when and where to run the jobs 
-or SLURM can control when and where to run jobs. 
-In the later case, Moab can modify the priorities of pending jobs 
+<p><b>JobPriority</b> controls the scheduling of newly arriving
+jobs in SLURM.
+SLURM can either place all newly arriving jobs in a HELD state
+(priority = 0) and let Moab decide when and where to run the jobs
+or SLURM can control when and where to run jobs.
+In the later case, Moab can modify the priorities of pending jobs
 to re-order the job queue or just monitor system state.
-Possible values are "hold" and "run" with "hold" being the default.</p> 
+Possible values are "hold" and "run" with "hold" being the default.</p>
 
 <p>Here is a sample <i>wiki.conf</i> file
 <pre>
@@ -163,7 +163,7 @@ Possible values are "hold" and "run" with "hold" being the default.</p>
 # Matches KEY in moab-private.cfg
 AuthKey=123456789
 #
-# SLURM to directly schedule "debug" partition 
+# SLURM to directly schedule "debug" partition
 # and hide the jobs from Moab
 ExcludePartitions=debug
 HidePartitionJobs=debug
@@ -176,7 +176,7 @@ EPort=15017
 # Moab event notification host, where the Moab daemon runs
 #EHost=tux0
 #
-# Moab event notification throttle, 
+# Moab event notification throttle,
 # matches JOBAGGREGATIONTIME in moab.cfg (seconds)
 JobAggregationTime=15
 </pre>
@@ -199,11 +199,11 @@ CLIENTCFG[RM:slurm] KEY=123456789
 <h3>Job Submission</h3>
 
 <p>Jobs can either be submitted to Moab or directly to SLURM.
-Moab's <i>msub</i> command has a <i>--slurm</i> option that can 
-be placed at the <b>end</b> of the command line and those options 
-will be passed to SLURM. This can be used to invoke SLURM 
+Moab's <i>msub</i> command has a <i>--slurm</i> option that can
+be placed at the <b>end</b> of the command line and those options
+will be passed to SLURM. This can be used to invoke SLURM
 options which are not directly supported by Moab (e.g.
-system images to boot, task distribution specification across 
+system images to boot, task distribution specification across
 sockets, cores, and hyperthreads, etc.).
 For example:
 <pre>
@@ -213,10 +213,10 @@ msub my.script -l walltime=600,nodes=2 \
 
 <h3>User Environment</h3>
 
-<p>When a user submits a job to Moab, that job could potentially 
-execute on a variety of computers, so it is typically necessary 
-that the user's environment on the execution host be loaded. 
-Moab relies upon SLURM to perform this action, using the 
+<p>When a user submits a job to Moab, that job could potentially
+execute on a variety of computers, so it is typically necessary
+that the user's environment on the execution host be loaded.
+Moab relies upon SLURM to perform this action, using the
 <i>--get-user-env</i> option for the salloc, sbatch and srun commands.
 The SLURM command then executes as user root a command of this sort
 as user root:</p>
@@ -224,37 +224,37 @@ as user root:</p>
 /bin/su - &lt;user&gt; -c \
         "/bin/echo BEGIN; /bin/env; /bin/echo FINI"
 </pre>
-<p> For typical batch jobs, the job transfer from Moab to 
+<p> For typical batch jobs, the job transfer from Moab to
 SLURM is performed using <i>sbatch</i> and occurs instantaneously.
-The environment is loadeded by a SLURM daemon (slurmd) when the 
+The environment is loadeded by a SLURM daemon (slurmd) when the
 batch job begins execution.
-For interactive jobs (<i>msub -I ...</i>), the job transfer 
+For interactive jobs (<i>msub -I ...</i>), the job transfer
 from Moab to SLURM can not be completed until the environment
-variables are loaded, during which time the Moab daemon is 
-completely non-responsive. 
+variables are loaded, during which time the Moab daemon is
+completely non-responsive.
 To insure that Moab remains operational, SLURM will abort the above
-command within a configurable period of time and look for a cache 
-file with the user's environment and use that if found. 
+command within a configurable period of time and look for a cache
+file with the user's environment and use that if found.
 Otherwise an error is reported to Moab.
-The time permitted for loading the current environment 
+The time permitted for loading the current environment
 before searching for a cache file is configurable using
 the <i>GetEnvTimeout</i> parameter in SLURM's configuration
-file, slurm.conf. A value of zero results in immediately 
+file, slurm.conf. A value of zero results in immediately
 using the cache file. The default value is 2 seconds.</p>
 
-<p>We have provided a simple program that can be used to build 
+<p>We have provided a simple program that can be used to build
 cache files for users. The program can be found in the SLURM
-distribution at <i>contribs/env_cache_builder.c</i>. 
-This program can support a longer timeout than Moab, but 
-will report errors for users for whom the environment file 
+distribution at <i>contribs/env_cache_builder.c</i>.
+This program can support a longer timeout than Moab, but
+will report errors for users for whom the environment file
 can not be automatically build (typically due to the user's
-"dot" files spawning another shell so the desired command 
+"dot" files spawning another shell so the desired command
 never execution).
 For such user, you can manually build a cache file.
-You may want to execute this program periodically to capture 
+You may want to execute this program periodically to capture
 information for new users or changes in existing users'
 environment.
-A sample execution is shown below. 
+A sample execution is shown below.
 Run this on the same host as the Moab daemon and execute it as user root.</p>
 
 <pre>
diff --git a/doc/html/mpi_guide.shtml b/doc/html/mpi_guide.shtml
index 9da0d09fc058e7cfa8b81a455c88958a7e43a90a..ed4538f0735f3e9a8faa33c275fee69b675ae476 100644
--- a/doc/html/mpi_guide.shtml
+++ b/doc/html/mpi_guide.shtml
@@ -2,24 +2,24 @@
 
 <h1>MPI Use Guide</h1>
 
-<p>MPI use depends upon the type of MPI being used. 
-There are three fundamentally different modes of operation used 
+<p>MPI use depends upon the type of MPI being used.
+There are three fundamentally different modes of operation used
 by these various MPI implementation.
 <ol>
-<li>SLURM directly launches the tasks and performs initialization 
-of communications (Quadrics MPI, MPICH2, MPICH-GM, MPICH-MX, 
+<li>SLURM directly launches the tasks and performs initialization
+of communications (Quadrics MPI, MPICH2, MPICH-GM, MPICH-MX,
 MVAPICH, MVAPICH2, some MPICH1 modes, and future versions of OpenMPI).</li>
 <li>SLURM creates a resource allocation for the job and then
 mpirun launches tasks using SLURM's infrastructure (OpenMPI,
 LAM/MPI and HP-MPI).</li>
-<li>SLURM creates a resource allocation for the job and then 
-mpirun launches tasks using some mechanism other than SLURM, 
-such as SSH or RSH (BlueGene MPI and some MPICH1 modes). 
-These tasks initiated outside of SLURM's monitoring 
-or control. SLURM's epilog should be configured to purge 
+<li>SLURM creates a resource allocation for the job and then
+mpirun launches tasks using some mechanism other than SLURM,
+such as SSH or RSH (BlueGene MPI and some MPICH1 modes).
+These tasks initiated outside of SLURM's monitoring
+or control. SLURM's epilog should be configured to purge
 these tasks when the job's allocation is relinquished. </li>
 </ol>
-<p>Links to instructions for using several varieties of MPI 
+<p>Links to instructions for using several varieties of MPI
 with SLURM are provided below.
 <ul>
 <li><a href="#bluegene_mpi">BlueGene MPI</a></li>
@@ -40,15 +40,15 @@ with SLURM are provided below.
 <h2><a name="open_mpi" href="http://www.open-mpi.org/"><b>Open MPI</b></a></h2>
 
 <p>Open MPI relies upon
-SLURM to allocate resources for the job and then mpirun to initiate the 
-tasks. When using <span class="commandline">salloc</span> command, 
-<span class="commandline">mpirun</span>'s -nolocal option is recommended. 
+SLURM to allocate resources for the job and then mpirun to initiate the
+tasks. When using <span class="commandline">salloc</span> command,
+<span class="commandline">mpirun</span>'s -nolocal option is recommended.
 For example:
 <pre>
-$ salloc -n4 sh    # allocates 4 processors 
+$ salloc -n4 sh    # allocates 4 processors
                    # and spawns shell for job
 &gt; mpirun -np 4 -nolocal a.out
-&gt; exit             # exits shell spawned by 
+&gt; exit             # exits shell spawned by
                    # initial srun command
 </pre>
 <p>Note that any direct use of <span class="commandline">srun</span>
@@ -59,19 +59,19 @@ option will be required to explicitly disable the LAM/MPI plugin.</p>
 
 <h2>Future Use</h2>
 <p>There is work underway in both SLURM and Open MPI to support task launch
-using the <span class="commandline">srun</span> command. 
+using the <span class="commandline">srun</span> command.
 We expect this mode of operation to be supported late in 2009.
 It may differ slightly from the description below.
-It relies upon SLURM version 2.0 (or higher) managing 
+It relies upon SLURM version 2.0 (or higher) managing
 reservations of communication ports for the Open MPI's use.
-The system administrator must specify the range of ports to be reserved 
+The system administrator must specify the range of ports to be reserved
 in the <i>slurm.conf</i> file using the <i>MpiParams</i> parameter.
 For example: <br>
 <i>MpiParams=ports=12000-12999</i></p>
 
-<p>Launch tasks using the <span class="commandline">srun</span> command 
+<p>Launch tasks using the <span class="commandline">srun</span> command
 plus the option <i>--resv-ports</i>.
-The ports reserved on every allocated node will be identified in an 
+The ports reserved on every allocated node will be identified in an
 environment variable available to the tasks as shown here: <br>
 <i>SLURM_STEP_RESV_PORTS=12000-12015</i></p>
 
@@ -80,7 +80,7 @@ to be in use, a message of this form will be printed and the job step
 will be re-launched:<br>
 <i>srun: error: sun000: task 0 unble to claim reserved port, retrying</i><br>
 After three failed attempts, the job step will be aborted.
-Repeated failures should be reported to your system administrator in 
+Repeated failures should be reported to your system administrator in
 order to rectify the problem by cancelling the processes holding those
 ports.</p>
 <hr size=4 width="100%">
@@ -88,9 +88,9 @@ ports.</p>
 
 <h2><a name="quadrics_mpi" href="http://www.quadrics.com/"><b>Quadrics MPI</b></a></h2>
 
-<p>Quadrics MPI relies upon SLURM to 
-allocate resources for the job and <span class="commandline">srun</span> 
-to initiate the tasks. One would build the MPI program in the normal manner 
+<p>Quadrics MPI relies upon SLURM to
+allocate resources for the job and <span class="commandline">srun</span>
+to initiate the tasks. One would build the MPI program in the normal manner
 then initiate it using a command line of this sort:</p>
 <pre>
 $ srun [options] &lt;program&gt; [program args]
@@ -100,17 +100,17 @@ $ srun [options] &lt;program&gt; [program args]
 
 <h2><a name="lam_mpi" href="http://www.lam-mpi.org/"><b>LAM/MPI</b></a></h2>
 
-<p>LAM/MPI relies upon the SLURM 
+<p>LAM/MPI relies upon the SLURM
 <span class="commandline">salloc</span> or <span class="commandline">sbatch</span>
-command to allocate. In either case, specify 
-the maximum number of tasks required for the job. Then execute the 
-<span class="commandline">lamboot</span> command to start lamd daemons. 
-<span class="commandline">lamboot</span> utilizes SLURM's 
-<span class="commandline">srun</span> command to launch these daemons. 
-Do not directly execute the <span class="commandline">srun</span> command 
-to launch LAM/MPI tasks. For example: 
+command to allocate. In either case, specify
+the maximum number of tasks required for the job. Then execute the
+<span class="commandline">lamboot</span> command to start lamd daemons.
+<span class="commandline">lamboot</span> utilizes SLURM's
+<span class="commandline">srun</span> command to launch these daemons.
+Do not directly execute the <span class="commandline">srun</span> command
+to launch LAM/MPI tasks. For example:
 <pre>
-$ salloc -n16 sh  # allocates 16 processors 
+$ salloc -n16 sh  # allocates 16 processors
                   # and spawns shell for job
 &gt; lamboot
 &gt; mpirun -np 16 foo args
@@ -119,12 +119,12 @@ $ salloc -n16 sh  # allocates 16 processors
 etc.
 &gt; lamclean
 &gt; lamhalt
-&gt; exit            # exits shell spawned by 
+&gt; exit            # exits shell spawned by
                   # initial srun command
 </pre>
-<p>Note that any direct use of <span class="commandline">srun</span> 
+<p>Note that any direct use of <span class="commandline">srun</span>
 will only launch one task per node when the LAM/MPI plugin is configured
-as the default plugin.  To launch more than one task per node using the 
+as the default plugin.  To launch more than one task per node using the
 <span class="commandline">srun</span> command, the <i>--mpi=none</i>
 option would be required to explicitly disable the LAM/MPI plugin
 if that is the system default.</p>
@@ -133,8 +133,8 @@ if that is the system default.</p>
 
 <h2><a name="hp_mpi" href="http://www.hp.com/go/mpi"><b>HP-MPI</b></a></h2>
 
-<p>HP-MPI uses the 
-<span class="commandline">mpirun</span> command with the <b>-srun</b> 
+<p>HP-MPI uses the
+<span class="commandline">mpirun</span> command with the <b>-srun</b>
 option to launch jobs. For example:
 <pre>
 $MPI_ROOT/bin/mpirun -TCP -srun -N8 ./a.out
@@ -144,10 +144,10 @@ $MPI_ROOT/bin/mpirun -TCP -srun -N8 ./a.out
 
 <h2><a name="mpich2" href="http://www.mcs.anl.gov/research/projects/mpich2/"><b>MPICH2</b></a></h2>
 
-<p>MPICH2 jobs are launched using the <b>srun</b> command. Just link your program with 
+<p>MPICH2 jobs are launched using the <b>srun</b> command. Just link your program with
 SLURM's implementation of the PMI library so that tasks can communicate
 host and port information at startup. (The system administrator can add
-these option to the mpicc and mpif77 commands directly, so the user will not 
+these option to the mpicc and mpif77 commands directly, so the user will not
 need to bother). For example:
 <pre>
 $ mpicc -L&lt;path_to_slurm_lib&gt; -lpmi ...
@@ -155,9 +155,9 @@ $ srun -n20 a.out
 </pre>
 <b>NOTES:</b>
 <ul>
-<li>Some MPICH2 functions are not currently supported by the PMI 
+<li>Some MPICH2 functions are not currently supported by the PMI
 library integrated with SLURM</li>
-<li>Set the environment variable <b>PMI_DEBUG</b> to a numeric value 
+<li>Set the environment variable <b>PMI_DEBUG</b> to a numeric value
 of 1 or higher for the PMI library to print debugging information</li>
 </ul></p>
 <hr size=4 width="100%">
@@ -166,8 +166,8 @@ of 1 or higher for the PMI library to print debugging information</li>
 <h2><a name="mpich_gm" href="http://www.myri.com/scs/download-mpichgm.html"><b>MPICH-GM</b></a></h2>
 
 <p>MPICH-GM jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mpichgm</i> MPI plugin must be used to establish communications 
-between the launched tasks. This can be accomplished either using the SLURM 
+SLURM's <i>mpichgm</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the SLURM
 configuration parameter <i>MpiDefault=mpichgm</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mpichgm</i> option.
 <pre>
@@ -194,8 +194,8 @@ $ srun -n16 --mpi=mpichmx a.out
 <h2><a name="mvapich" href="http://mvapich.cse.ohio-state.edu/"><b>MVAPICH</b></a></h2>
 
 <p>MVAPICH jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mvapich</i> MPI plugin must be used to establish communications 
-between the launched tasks. This can be accomplished either using the SLURM 
+SLURM's <i>mvapich</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the SLURM
 configuration parameter <i>MpiDefault=mvapich</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mvapich</i> option.
 <pre>
@@ -205,10 +205,10 @@ $ srun -n16 --mpi=mvapich a.out
 <b>NOTE:</b> If MVAPICH is used in the shared memory model, with all tasks
 running on a single node, then use the <i>mpich1_shmem</i> MPI plugin instead.<br>
 <b>NOTE (for system administrators):</b> Configure
-<i>PropagateResourceLimitsExcept=MEMLOCK</i> in <b>slurm.conf</b> and 
+<i>PropagateResourceLimitsExcept=MEMLOCK</i> in <b>slurm.conf</b> and
 start the <i>slurmd</i> daemons with an unlimited locked memory limit.
-For more details, see 
-<a href="http://mvapich.cse.ohio-state.edu/support/mvapich_user_guide.html#x1-420007.2.3">MVAPICH</a> 
+For more details, see
+<a href="http://mvapich.cse.ohio-state.edu/support/mvapich_user_guide.html#x1-420007.2.3">MVAPICH</a>
 documentation for "CQ or QP Creation failure".</p>
 <hr size=4 width="100%">
 
@@ -216,9 +216,9 @@ documentation for "CQ or QP Creation failure".</p>
 <h2><a name="mvapich2" href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba"><b>MVAPICH2</b></a></h2>
 
 <p>MVAPICH2 jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>none</i> MPI plugin must be used to establish communications 
-between the launched tasks. This can be accomplished either using the SLURM 
-configuration parameter <i>MpiDefault=none</i> in <b>slurm.conf</b> 
+SLURM's <i>none</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the SLURM
+configuration parameter <i>MpiDefault=none</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=none</i> option. The program must also be linked with
 SLURM's implementation of the PMI library so that tasks can communicate
 host and port information at startup. (The system administrator can add
@@ -233,10 +233,10 @@ $ srun -n16 --mpi=none a.out
 
 <h2><a name="bluegene_mpi" href="http://www.research.ibm.com/bluegene/"><b>BlueGene MPI</b></a></h2>
 
-<p>BlueGene MPI relies upon SLURM to create the resource allocation and then 
-uses the native <span class="commandline">mpirun</span> command to launch tasks. 
-Build a job script containing one or more invocations of the 
-<span class="commandline">mpirun</span> command. Then submit 
+<p>BlueGene MPI relies upon SLURM to create the resource allocation and then
+uses the native <span class="commandline">mpirun</span> command to launch tasks.
+Build a job script containing one or more invocations of the
+<span class="commandline">mpirun</span> command. Then submit
 the script to SLURM using <span class="commandline">sbatch</span>.
 For example:</p>
 <pre>
@@ -244,50 +244,50 @@ $ sbatch -N512 my.script
 </pre>
 <p>Note that the node count specified with the <i>-N</i> option indicates
 the base partition count.
-See <a href="bluegene.html">BlueGene User and Administrator Guide</a> 
+See <a href="bluegene.html">BlueGene User and Administrator Guide</a>
 for more information.</p>
 <hr size=4 width="100%">
 
 
 <h2><a name="mpich1" href="http://www-unix.mcs.anl.gov/mpi/mpich1/"><b>MPICH1</b></a></h2>
 
-<p>MPICH1 development ceased in 2005. It is recommended that you convert to 
-MPICH2 or some other MPI implementation. 
-If you still want to use MPICH1, note that it has several different 
-programming models. If you are using the shared memory model 
-(<i>DEFAULT_DEVICE=ch_shmem</i> in the mpirun script), then initiate 
-the tasks using the <span class="commandline">srun</span> command 
+<p>MPICH1 development ceased in 2005. It is recommended that you convert to
+MPICH2 or some other MPI implementation.
+If you still want to use MPICH1, note that it has several different
+programming models. If you are using the shared memory model
+(<i>DEFAULT_DEVICE=ch_shmem</i> in the mpirun script), then initiate
+the tasks using the <span class="commandline">srun</span> command
 with the <i>--mpi=mpich1_shmem</i> option.</p>
 <pre>
 $ srun -n16 --mpi=mpich1_shmem a.out
 </pre>
 
-<p>If you are using MPICH P4 (<i>DEFAULT_DEVICE=ch_p4</i> in 
-the mpirun script) and SLURM version 1.2.11 or newer, 
-then it is recommended that you apply the patch in the SLURM 
-distribution's file <i>contribs/mpich1.slurm.patch</i>. 
-Follow directions within the file to rebuild MPICH. 
+<p>If you are using MPICH P4 (<i>DEFAULT_DEVICE=ch_p4</i> in
+the mpirun script) and SLURM version 1.2.11 or newer,
+then it is recommended that you apply the patch in the SLURM
+distribution's file <i>contribs/mpich1.slurm.patch</i>.
+Follow directions within the file to rebuild MPICH.
 Applications must be relinked with the new library.
-Initiate tasks using the 
-<span class="commandline">srun</span> command with the 
+Initiate tasks using the
+<span class="commandline">srun</span> command with the
 <i>--mpi=mpich1_p4</i> option.</p>
 <pre>
 $ srun -n16 --mpi=mpich1_p4 a.out
 </pre>
-<p>Note that SLURM launches one task per node and the MPICH 
-library linked within your applications launches the other 
+<p>Note that SLURM launches one task per node and the MPICH
+library linked within your applications launches the other
 tasks with shared memory used for communications between them.
 The only real anomaly is that all output from all spawned tasks
 on a node appear to SLURM as coming from the one task that it
 launched. If the srun --label option is used, the task ID labels
 will be misleading.</p>
- 
-<p>Other MPICH1 programming models current rely upon the SLURM 
-<span class="commandline">salloc</span> or 
+
+<p>Other MPICH1 programming models current rely upon the SLURM
+<span class="commandline">salloc</span> or
 <span class="commandline">sbatch</span> command to allocate resources.
 In either case, specify the maximum number of tasks required for the job.
-You may then need to build a list of hosts to be used and use that 
-as an argument to the mpirun command. 
+You may then need to build a list of hosts to be used and use that
+as an argument to the mpirun command.
 For example:
 <pre>
 $ cat mpich.sh
@@ -298,10 +298,10 @@ rm -f slurm.hosts
 $ sbatch -n16 mpich.sh
 sbatch: Submitted batch job 1234
 </pre>
-<p>Note that in this example, mpirun uses the rsh command to launch 
-tasks. These tasks are not managed by SLURM since they are launched 
+<p>Note that in this example, mpirun uses the rsh command to launch
+tasks. These tasks are not managed by SLURM since they are launched
 outside of its control.</p>
- 
+
 <p style="text-align:center;">Last modified 2 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/mpiplugins.shtml b/doc/html/mpiplugins.shtml
index 96c73e2d8bdfcc236af2452b307d2c336757b1a1..afffe045d6b5d13694b13dbdeb5018120cbafaf6 100644
--- a/doc/html/mpiplugins.shtml
+++ b/doc/html/mpiplugins.shtml
@@ -3,17 +3,17 @@
 <h1><a name="top">SLURM MPI Plugin API</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM MPI selection plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p> This document describes SLURM MPI selection plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 node selection plugins. This is version 0 of the API.</p>
 
 <p>SLURM mpi selection plugins are SLURM plugins that implement the which version of
-mpi is used during execution of the new SLURM job. API described herein. They are 
-intended to provide a mechanism for both selecting mpi versions for pending jobs and 
-performing any mpi-specific tasks for job launch or termination. The plugins must 
+mpi is used during execution of the new SLURM job. API described herein. They are
+intended to provide a mechanism for both selecting mpi versions for pending jobs and
+performing any mpi-specific tasks for job launch or termination. The plugins must
 conform to the SLURM Plugin API with the following specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;mpi.&quot; The minor type can be any recognizable 
+The major type must be &quot;mpi.&quot; The minor type can be any recognizable
 abbreviation for the type of node selection algorithm. We recommend, for example:</p>
 <ul>
 <li><b>lam</b>&#151;For use with LAM MPI and Open MPI.</li>
@@ -21,9 +21,9 @@ abbreviation for the type of node selection algorithm. We recommend, for example
 <li><b>mvapich</b>&#151;For use with Infiniband.</li>
 <li><b>none</b>&#151;For use with most other versions of MPI.</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for node selection support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for node selection support.
 Note carefully, however, the versioning discussion below.</p>
 
 <p>A simplified flow of logic follows:
@@ -46,34 +46,34 @@ which will set configure the slurmd to use the correct mpi as well to interact w
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
-<p> These functions are expected to read and/or modify data structures directly in 
-the slurmd daemon's and srun memory. Slurmd is a multi-threaded program with independent 
-read and write locks on each data structure type. Therefore the type of operations 
+<p> These functions are expected to read and/or modify data structures directly in
+the slurmd daemon's and srun memory. Slurmd is a multi-threaded program with independent
+read and write locks on each data structure type. Therefore the type of operations
 permitted on various data structures is identified for each function.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <p class="commandline">int mpi_p_init (slurmd_job_t *job, int rank);</p>
 <p style="margin-left:.2in"><b>Description</b>: Used by slurmd to configure the slurmd's environment
 to that of the correct mpi.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<br><span class="commandline"> job</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<br><span class="commandline"> job</span>&nbsp;
 &nbsp;&nbsp;(input) Pointer to the slurmd_job that is running.  Cannot be NULL.<br>
 <span class="commandline"> rank</span>&nbsp;
-&nbsp;&nbsp;(input) Primarily there for MVAPICH.  Used to send the rank fo the mpirun job. 
+&nbsp;&nbsp;(input) Primarily there for MVAPICH.  Used to send the rank fo the mpirun job.
 This can be 0 if no rank information is needed for the mpi type.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
 <p class="commandline">int mpi_p_thr_create (srun_job_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: Used by srun to spawn the thread for the mpi processes. 
+<p style="margin-left:.2in"><b>Description</b>: Used by srun to spawn the thread for the mpi processes.
 Most all the real processing happens here.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> job</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> job</span>&nbsp;
 &nbsp;&nbsp;(input) Pointer to the srun_job that is running.  Cannot be NULL.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return -1.</p>
 
 <p class="commandline">int mpi_p_single_task ();</p>
@@ -81,25 +81,25 @@ the plugin should return -1.</p>
 can run at the same time </p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> none</span></p>
-<p style="margin-left:.2in"><b>Returns</b>: false if multiple tasks can run and true if only 
+<p style="margin-left:.2in"><b>Returns</b>: false if multiple tasks can run and true if only
 a single task can run at one time.</p>
 
 <p class="commandline">int mpi_p_exit();</p>
-<p style="margin-left:.2in"><b>Description</b>: Cleans up anything that needs cleaning up after 
+<p style="margin-left:.2in"><b>Description</b>: Cleans up anything that needs cleaning up after
 execution.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> none</span></p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM node selection API. Future 
-releases of SLURM may revise this API. A node selection plugin conveys its ability 
-to implement a particular API version using the mechanism outlined for SLURM plugins. 
-In addition, the credential is transmitted along with the version number of the 
-plugin that transmitted it. It is at the discretion of the plugin author whether 
+<p> This document describes version 0 of the SLURM node selection API. Future
+releases of SLURM may revise this API. A node selection plugin conveys its ability
+to implement a particular API version using the mechanism outlined for SLURM plugins.
+In addition, the credential is transmitted along with the version number of the
+plugin that transmitted it. It is at the discretion of the plugin author whether
 to maintain data format compatibility across different versions of the plugin.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/news.shtml b/doc/html/news.shtml
index c31d4144e5cbf3b6e55f9143c9f981e67611d81f..4e5827b560771eb065973a5a0e64b31666845ea6 100644
--- a/doc/html/news.shtml
+++ b/doc/html/news.shtml
@@ -14,19 +14,19 @@
 <p>SLURM Version 1.3 was released in March 2008.
 Major enhancements include:
 <ul>
-<li>Job accounting and completion data can be stored in a database 
+<li>Job accounting and completion data can be stored in a database
 (MySQL, PGSQL or simple text file).</li>
 <li>SlurmDBD (Slurm Database Deamon) introduced to provide secure
 database support across multiple clusters.</li>
 <li>Gang scheduler plugin added (time-slicing of parallel jobs
 without an external scheduler).</li>
-<li>Cryptography logic moved to a separate plugin with the 
+<li>Cryptography logic moved to a separate plugin with the
 option of using OpenSSL (default) or Munge (GPL).</li>
 <li>Improved scheduling of multiple job steps within a job's allocation.</li>
-<li>Support for job specification of node features with node counts.</li> 
-<li><i>srun</i>'s --alloc, --attach, and --batch options removed (use 
+<li>Support for job specification of node features with node counts.</li>
+<li><i>srun</i>'s --alloc, --attach, and --batch options removed (use
 <i>salloc</i>, <i>sattach</i> or <i>sbatch</i> commands instead).</li>
-<li><i>srun --pty</i> option added to support remote pseudo terminal for 
+<li><i>srun --pty</i> option added to support remote pseudo terminal for
 spawned tasks.</li>
 <li>Support added for a much richer job dependency specification
 including testing of exit codes and multiple dependencies.</li>
@@ -38,30 +38,30 @@ Computing) mode.</li>
 <p>SLURM Version 2.0 was released in May 2009.
 Major enhancements include:
 <ul>
-<li>Sophisticated <a href="priority_multifactor.html">job prioritization 
-plugin</a> is now available. 
-Jobs can be prioritized based upon their age, size and/or fair-share resource 
+<li>Sophisticated <a href="priority_multifactor.html">job prioritization
+plugin</a> is now available.
+Jobs can be prioritized based upon their age, size and/or fair-share resource
 allocation using hierarchical bank accounts.</li>
-<li>An assortment of <a href="resource_limits.html">resource limits</a> 
-can be imposed upon individual users and/or hierarchical bank accounts 
-such as maximum job time limit, maximum job size, and maximum number of 
+<li>An assortment of <a href="resource_limits.html">resource limits</a>
+can be imposed upon individual users and/or hierarchical bank accounts
+such as maximum job time limit, maximum job size, and maximum number of
 running jobs.</li>
-<li><a href="reservations.html">Advanced reservations</a> can be made to 
+<li><a href="reservations.html">Advanced reservations</a> can be made to
 insure resources will be available when needed.</li>
-<li>Idle nodes can now be completely <a href="power_save.html">powered 
-down</a> when idle and automatically restarted when their is work 
+<li>Idle nodes can now be completely <a href="power_save.html">powered
+down</a> when idle and automatically restarted when their is work
 available.</li>
-<li>Jobs in higher priority partitions (queues) can automatically 
-<a href="preempt.html">preempt</a> jobs in lower priority queues. 
-The preempted jobs will automatically resume execution upon completion 
+<li>Jobs in higher priority partitions (queues) can automatically
+<a href="preempt.html">preempt</a> jobs in lower priority queues.
+The preempted jobs will automatically resume execution upon completion
 of the higher priority job.</li>
-<li>Specific cores are allocated to jobs and jobs steps in order to effective 
+<li>Specific cores are allocated to jobs and jobs steps in order to effective
 preempt or gang schedule jobs.</li>
-<li>A new configuration parameter, <i>PrologSlurmctld</i>, can be used to 
+<li>A new configuration parameter, <i>PrologSlurmctld</i>, can be used to
 support the booting of different operating systems for each job.</li>
-<li>Added switch topology configuration options to optimize job resource 
+<li>Added switch topology configuration options to optimize job resource
 allocation with respect to communication performance.</li>
-<li>Automatic <a href="checkpoint_blcr.html">Checkpoint/Restart using BLCR</a> 
+<li>Automatic <a href="checkpoint_blcr.html">Checkpoint/Restart using BLCR</a>
 is now available.</li>
 </ul>
 
@@ -69,9 +69,9 @@ is now available.</li>
 <p>SLURM Version 2.1 is scheduled for released late in 2009.
 Major enhancements include:
 <ul>
-<li>Support for job preemption based upon job Quality of Service (QOS) in 
+<li>Support for job preemption based upon job Quality of Service (QOS) in
 addition to queue priority.</li>
-<li>Support for time limits on individual job steps (in addition to the 
+<li>Support for time limits on individual job steps (in addition to the
 job time limit).</li>
 <li>Fully implement the PgSQL accounting plugin.</li>
 <li>Provide a web-based SLURM administration tool.</li>
@@ -81,7 +81,7 @@ job time limit).</li>
 </ul>
 
 <h2><a name="22">Major Updates in SLURM Version 2.2 and beyond</a></h2>
-<p> Detailed plans for release dates and contents of future SLURM releases have 
+<p> Detailed plans for release dates and contents of future SLURM releases have
 not been finalized. Anyone desiring to perform SLURM development should notify
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>
 to coordinate activities. Future development plans includes:
@@ -91,7 +91,7 @@ hierarchical switches).</li>
 <li>Modify more SLURM commands to operate between clusters.</li>
 <li>Support for BlueGene/Q systems.</li>
 <li>Permit resource allocations (jobs) to change size.</li>
-<li>Add Kerberos credential support including credential forwarding 
+<li>Add Kerberos credential support including credential forwarding
 and refresh.</li>
 </ul>
 
diff --git a/doc/html/overview.shtml b/doc/html/overview.shtml
index 614ee3af769e4e4bea5765ebc36dc4855e3a4d86..234e901d3c34b1b49549c1f9057fdfe1bd2e1704 100644
--- a/doc/html/overview.shtml
+++ b/doc/html/overview.shtml
@@ -3,50 +3,50 @@
 <h1><a name="top">Overview</a></h1>
 
 <p>The Simple Linux Utility for Resource Management (SLURM) is an open source,
-fault-tolerant, and highly scalable cluster management and job scheduling system 
+fault-tolerant, and highly scalable cluster management and job scheduling system
 for large and small Linux clusters. SLURM requires no kernel modifications for
 its operation and is relatively self-contained. As a cluster resource manager,
-SLURM has three key functions. First, it allocates exclusive and/or non-exclusive 
+SLURM has three key functions. First, it allocates exclusive and/or non-exclusive
 access to resources (compute nodes) to users for some duration of time so they
 can perform work. Second, it provides a framework for starting, executing, and
-monitoring work (normally a parallel job) on the set of allocated nodes. 
-Finally, it arbitrates contention for resources by managing a queue of 
+monitoring work (normally a parallel job) on the set of allocated nodes.
+Finally, it arbitrates contention for resources by managing a queue of
 pending work.
-Optional plugins can be used for 
-<a href="accounting.html">accounting</a>, 
-<a href="reservations.html">advanced reservation</a>, 
-<a href="gang_scheduling.html">gang scheduling</a> (time sharing for 
-parallel jobs), backfill scheduling, 
-<a href="resource_limits.html">resource limits</a> by user or bank account, 
+Optional plugins can be used for
+<a href="accounting.html">accounting</a>,
+<a href="reservations.html">advanced reservation</a>,
+<a href="gang_scheduling.html">gang scheduling</a> (time sharing for
+parallel jobs), backfill scheduling,
+<a href="resource_limits.html">resource limits</a> by user or bank account,
 and sophisticated <a href="priority_multifactor.html"> multifactor job
   prioritization</a> algorithms.
 
 
-<p>SLURM has been developed through the collaborative efforts of 
+<p>SLURM has been developed through the collaborative efforts of
 <a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory (LLNL)</a>,
-<a href="http://www.hp.com/">Hewlett-Packard</a>, 
+<a href="http://www.hp.com/">Hewlett-Packard</a>,
 <a href="http://www.bull.com/">Bull</a>,
 Linux NetworX and many other contributors.</p>
 
 <h2>Architecture</h2>
-<p>SLURM has a centralized manager, <b>slurmctld</b>, to monitor resources and 
-work. There may also be a backup manager to assume those responsibilities in the 
-event of failure. Each compute server (node) has a <b>slurmd</b> daemon, which 
-can be compared to a remote shell: it waits for work, executes that work, returns 
-status, and waits for more work. 
+<p>SLURM has a centralized manager, <b>slurmctld</b>, to monitor resources and
+work. There may also be a backup manager to assume those responsibilities in the
+event of failure. Each compute server (node) has a <b>slurmd</b> daemon, which
+can be compared to a remote shell: it waits for work, executes that work, returns
+status, and waits for more work.
 The <b>slurmd</b> daemons provide fault-tolerant hierarchical communications.
 There is an optional <b>slurmdbd</b> (Slurm DataBase Daemon) which can be used
-to record accounting information for multiple Slurm-managed clusters in a 
+to record accounting information for multiple Slurm-managed clusters in a
 single database.
-User tools include <b>srun</b> to initiate jobs, 
-<b>scancel</b> to terminate queued or running jobs, 
-<b>sinfo</b> to report system status, 
-<b>squeue</b> to report the status of jobs, and 
+User tools include <b>srun</b> to initiate jobs,
+<b>scancel</b> to terminate queued or running jobs,
+<b>sinfo</b> to report system status,
+<b>squeue</b> to report the status of jobs, and
 <b>sacct</b> to get information about jobs and job steps that are running or have completed.
-The <b>smap</b> and <b>sview</b> commands graphically reports system and 
-job status including network topology. 
-There is an administrative tool <b>scontrol</b> available to monitor 
-and/or modify configuration and state information on the cluster. 
+The <b>smap</b> and <b>sview</b> commands graphically reports system and
+job status including network topology.
+There is an administrative tool <b>scontrol</b> available to monitor
+and/or modify configuration and state information on the cluster.
 The administrative tool used to manage the database is <b>sacctmgr</b>.
 It can be used to identify the clusters, valid users, valid bank accounts, etc.
 APIs are available for all functions.</p>
@@ -56,79 +56,79 @@ APIs are available for all functions.</p>
   Figure 1. SLURM components
 </div>
 
-<p>SLURM has a general-purpose plugin mechanism available to easily support various 
-infrastructures. This permits a wide variety of SLURM configurations using a 
-building block approach. These plugins presently include: 
+<p>SLURM has a general-purpose plugin mechanism available to easily support various
+infrastructures. This permits a wide variety of SLURM configurations using a
+building block approach. These plugins presently include:
 <ul>
-<li><a href="accounting_storageplugins.html">Accounting Storage</a>: 
-text file (default if jobacct_gather != none), 
+<li><a href="accounting_storageplugins.html">Accounting Storage</a>:
+text file (default if jobacct_gather != none),
 MySQL, PGSQL, SlurmDBD (Slurm Database Daemon) or none</li>
 
-<li><a href="authplugins.html">Authentication of communications</a>: 
-<a href="http://www.theether.org/authd/">authd</a>, 
+<li><a href="authplugins.html">Authentication of communications</a>:
+<a href="http://www.theether.org/authd/">authd</a>,
 <a href="http://home.gna.org/munge/">munge</a>, or none (default).</li>
 
 <li><a href="checkpoint_plugins.html">Checkpoint</a>: AIX, OpenMPI, XLCH, or none.</li>
 
-<li><a href="crypto_plugins.html">Cryptography (Digital Signature Generation)</a>: 
+<li><a href="crypto_plugins.html">Cryptography (Digital Signature Generation)</a>:
 <a href="http://home.gna.org/munge/">munge</a> (default) or
 <a href="http://www.openssl.org/">OpenSSL</a>.</li>
 
 <li><a href="jobacct_gatherplugins.html">Job Accounting Gather</a>: AIX, Linux, or none(default)</li>
 
-<li><a href="jobcompplugins.html">Job Completion Logging</a>: 
+<li><a href="jobcompplugins.html">Job Completion Logging</a>:
 text file, arbitrary script, MySQL, PGSQL, SlurmDBD, or none (default).</li>
 
 <li><a href="mpiplugins.html">MPI</a>: LAM, MPICH1-P4, MPICH1-shmem,
-MPICH-GM, MPICH-MX, MVAPICH, OpenMPI and none (default, for most 
+MPICH-GM, MPICH-MX, MVAPICH, OpenMPI and none (default, for most
 other versions of MPI including MPICH2 and MVAPICH2).</li>
 
 <li><a href="priority_plugins.html">Priority</a>:
 Assigns priorities to jobs as they arrive.
-Options include 
+Options include
 <a href="priority_multifactor.html">multifactor job prioritization</a>
 (assigns job priority based upon fair-share allocate, size, age, QoS, and/or partition) and
 basic (assigns job priority based upon age for First In First Out ordering, default).</li>
 
-<li><a href="proctrack_plugins.html">Process tracking (for signaling)</a>: 
-AIX (using a kernel extension), Linux process tree hierarchy, process group ID, 
+<li><a href="proctrack_plugins.html">Process tracking (for signaling)</a>:
+AIX (using a kernel extension), Linux process tree hierarchy, process group ID,
 RMS (Quadrics Linux kernel patch),
 and <a href="http://oss.sgi.com/projects/pagg/">SGI's Process Aggregates (PAGG)</a>.</li>
 
-<li><a href="selectplugins.html">Node selection</a>: 
-Bluegene (a 3-D torus interconnect BGL or BGP), 
-<a href="cons_res.html">consumable resources</a> (to allocate 
+<li><a href="selectplugins.html">Node selection</a>:
+Bluegene (a 3-D torus interconnect BGL or BGP),
+<a href="cons_res.html">consumable resources</a> (to allocate
 individual processors and memory) or linear (to dedicate entire nodes).</li>
 
-<li><a href="schedplugins.html">Scheduler</a>: 
-builtin (First In First Out, default), 
+<li><a href="schedplugins.html">Scheduler</a>:
+builtin (First In First Out, default),
 backfill (starts jobs early if doing so does not delay the expected initiation
-time of any higher priority job), 
+time of any higher priority job),
 gang (time-slicing for parallel jobs),
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-The Maui Scheduler</a>, and  
+The Maui Scheduler</a>, and
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
 Moab Cluster Suite</a>.
 There is also a <a href="priority_multifactor.html">multifactor job
-prioritization</a> plugin 
-available for use with the basic, backfill and gang schedulers only. 
+prioritization</a> plugin
+available for use with the basic, backfill and gang schedulers only.
 Jobs can be prioritized by age, size, fair-share allocation, etc.
-Many <a href="resource_limits.html">resource limits</a> are also 
+Many <a href="resource_limits.html">resource limits</a> are also
 configurable by user or bank account.</li>
 
-<li><a href="switchplugins.html">Switch or interconnect</a>: 
-<a href="http://www.quadrics.com/">Quadrics</a> 
-(Elan3 or Elan4), 
-Federation 
-<a href="http://publib-b.boulder.ibm.com/Redbooks.nsf/f338d71ccde39f08852568dd006f956d/55258945787efc2e85256db00051980a?OpenDocument">Federation</a> (IBM High Performance Switch), 
-or none (actually means nothing requiring special handling, such as Ethernet or 
+<li><a href="switchplugins.html">Switch or interconnect</a>:
+<a href="http://www.quadrics.com/">Quadrics</a>
+(Elan3 or Elan4),
+Federation
+<a href="http://publib-b.boulder.ibm.com/Redbooks.nsf/f338d71ccde39f08852568dd006f956d/55258945787efc2e85256db00051980a?OpenDocument">Federation</a> (IBM High Performance Switch),
+or none (actually means nothing requiring special handling, such as Ethernet or
 <a href="http://www.myricom.com/">Myrinet</a>, default).</li>
 
 <li><a href="taskplugins.html">Task Affinity</a>:
 Affinity (bind tasks to processors or CPU sets) or none (no binding, the default).</li>
 
 <li><a href="topology_plugin.html">Network Topology</a>:
-3d_torus (optimize resource selection based upon a 3d_torus interconnect, default for Cray XT, Sun Constellation and IBM BlueGene), 
+3d_torus (optimize resource selection based upon a 3d_torus interconnect, default for Cray XT, Sun Constellation and IBM BlueGene),
 tree (optimize resource selection based upon switch connections) or
 none (the default).</li>
 
@@ -147,8 +147,8 @@ a job is assigned a set of nodes, the user is able to initiate parallel work in
 the form of job steps in any configuration within the allocation. For instance,
 a single job step may be started that utilizes all nodes allocated to the job,
 or several job steps may independently use a portion of the allocation.
-SLURM provides resource management for the processors allocated to a job, 
-so that multiple job steps can be simultaneously submitted and queued until 
+SLURM provides resource management for the processors allocated to a job,
+so that multiple job steps can be simultaneously submitted and queued until
 there are available resources within the job's allocation.</p>
 
 <div class="figure">
@@ -159,21 +159,21 @@ there are available resources within the job's allocation.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Configurability</h2>
-<p>Node state monitored include: count of processors, size of real memory, size 
-of temporary disk space, and state (UP, DOWN, etc.). Additional node information 
-includes weight (preference in being allocated work) and features (arbitrary information 
-such as processor speed or type). 
+<p>Node state monitored include: count of processors, size of real memory, size
+of temporary disk space, and state (UP, DOWN, etc.). Additional node information
+includes weight (preference in being allocated work) and features (arbitrary information
+such as processor speed or type).
 Nodes are grouped into partitions, which may contain overlapping nodes so they are
 best thought of as job queues.
-Partition information includes: name, list of associated nodes, state (UP or DOWN), 
-maximum job time limit, maximum node count per job, group access list, 
+Partition information includes: name, list of associated nodes, state (UP or DOWN),
+maximum job time limit, maximum node count per job, group access list,
 priority (important if nodes are in multiple partitions) and shared node access policy
-with optional over-subscription level for gang scheduling (e.g. YES, NO or FORCE:2).  
-Bit maps are used to represent nodes and scheduling 
-decisions can be made by performing a small number of comparisons and a series 
+with optional over-subscription level for gang scheduling (e.g. YES, NO or FORCE:2).
+Bit maps are used to represent nodes and scheduling
+decisions can be made by performing a small number of comparisons and a series
 of fast bit map manipulations. A sample (partial) SLURM configuration file follows.</p>
 <pre>
-# 
+#
 # Sample /etc/slurm.conf
 #
 ControlMachine=linux0001
diff --git a/doc/html/plane_ex1.gif b/doc/html/plane_ex1.gif
index fbf93c340edd6ae3fec6170b0cc6eea4fc33af64..ed0c8eaa14e1ead6f025c17ecc307a22b03af4c1 100755
Binary files a/doc/html/plane_ex1.gif and b/doc/html/plane_ex1.gif differ
diff --git a/doc/html/plane_ex2.gif b/doc/html/plane_ex2.gif
index 892ee3789bc66ffa1a5232e52b6d241ed78c457f..a339badcc0d2baf021a2bca6886413b11c83874e 100755
Binary files a/doc/html/plane_ex2.gif and b/doc/html/plane_ex2.gif differ
diff --git a/doc/html/plane_ex3.gif b/doc/html/plane_ex3.gif
index fdd3ce2220e4aa998de9ba949dd6aee48995e85d..12f31a93d1b825909dd2b4d5e4d678bd535c1ba5 100755
Binary files a/doc/html/plane_ex3.gif and b/doc/html/plane_ex3.gif differ
diff --git a/doc/html/plane_ex5.gif b/doc/html/plane_ex5.gif
index ce8f9e7e0438d681b87ce4ad484d6cb8286486b0..a5d4667047453ea28bbc855136089b143a0ecff1 100755
Binary files a/doc/html/plane_ex5.gif and b/doc/html/plane_ex5.gif differ
diff --git a/doc/html/plane_ex6.gif b/doc/html/plane_ex6.gif
index 6e6a19a2b5636494194f00d3cffc5ec09db3040a..c7e7627b8c35b4bfc00a76132d794d23ea066ad2 100755
Binary files a/doc/html/plane_ex6.gif and b/doc/html/plane_ex6.gif differ
diff --git a/doc/html/plane_ex7.gif b/doc/html/plane_ex7.gif
index a70d55ac86df7ff4f55f6dd38fe49a4807b27eba..4ee301192beb957e3c97043d037fdb629450af70 100755
Binary files a/doc/html/plane_ex7.gif and b/doc/html/plane_ex7.gif differ
diff --git a/doc/html/platforms.shtml b/doc/html/platforms.shtml
index 5d2e0c6ab1555e06e7ef318f16162bf4edd6e8d8..79cd6eaaa2c3b05985780995ef8ffb18a586e5f4 100644
--- a/doc/html/platforms.shtml
+++ b/doc/html/platforms.shtml
@@ -4,27 +4,27 @@
 <h2>Operating Systems</h2>
 <ul>
 <li><b>AIX</b>&#151;SLURM support for AIX has been thoroughly tested.</li>
-<li><b>Linux</b>&#151;SLURM has been thoroughly tested on most popular Linux 
+<li><b>Linux</b>&#151;SLURM has been thoroughly tested on most popular Linux
 distributions using i386, ia64, and x86_64 architectures.</li>
 <li><b>OS X</b>&#151;SLURM support for OS X is available.</li>
-<li><b>Solaris</b>&#151;SLURM support for Solaris (OpenSolaris build 119) was 
+<li><b>Solaris</b>&#151;SLURM support for Solaris (OpenSolaris build 119) was
 added in version 2.1.</li>
 <li><b>Other</b>&#151;SLURM ports to other systems will be gratefully accepted.</li>
 </ul>
 <h2>Interconnects</h2>
 <ul>
-<li><b>BlueGene</b>&#151;SLURM support for IBM's BlueGene/L and BlueGene/P 
+<li><b>BlueGene</b>&#151;SLURM support for IBM's BlueGene/L and BlueGene/P
 systems has been thoroughly tested.</li>
 <li><b>Cray XT</b>&#151;Much of the infrastructure to support a Cray XT
 system is current in SLURM. The interface to ALPS/BASIL remains to be done.
 Please contact us if you would be interested in this work.</li>
-<li><b>Ethernet</b>&#151;Ethernet requires no special support from SLURM and has 
+<li><b>Ethernet</b>&#151;Ethernet requires no special support from SLURM and has
 been thoroughly tested.</li>
-<li><b>IBM Federation</b>&#151;SLURM support for IBM's Federation Switch 
+<li><b>IBM Federation</b>&#151;SLURM support for IBM's Federation Switch
 has been thoroughly tested.</li>
 <li><b>Infiniband</b>&#151;Infiniband support has been thoroughly tested.</li>
 <li><b>Myrinet</b>&#151;Myrinet, MPICH-GM and MPICH-MX are supported.</li>
-<li><b>Quadrics Elan</b>&#151;SLURM support for Quadrics Elan 3 and Elan 4 switches 
+<li><b>Quadrics Elan</b>&#151;SLURM support for Quadrics Elan 3 and Elan 4 switches
 are available in all versions of SLURM and have been thoroughly tested.</li>
 <li><b>Sun Constellation</b>&#151;Resource allocation has been optimized
 for the three-dimensional torus interconnect.</li>
diff --git a/doc/html/plugins.shtml b/doc/html/plugins.shtml
index f1c9dcb53ae2459cb407acd200e52faa4433fdb9..d58b811875fc048d46cbb5017cb3eb8c7ce72a40 100644
--- a/doc/html/plugins.shtml
+++ b/doc/html/plugins.shtml
@@ -2,108 +2,108 @@
 
 <h1><a name="top">SLURM Plugin API</a></h1>
 <h2>Overview</h2>
-<p>A SLURM plugin is a dynamically linked code object which is loaded explicitly 
-at run time by the SLURM libraries. A plugin provides a customized implementation 
-of a well-defined API connected to tasks such as authentication, interconnect 
+<p>A SLURM plugin is a dynamically linked code object which is loaded explicitly
+at run time by the SLURM libraries. A plugin provides a customized implementation
+of a well-defined API connected to tasks such as authentication, interconnect
 fabric, and task scheduling.</p>
 <h2>Identification</h2>
-<p>A SLURM plugin identifies itself by a short character string formatted similarly 
-to a MIME type: <i>&lt;major&gt;/&lt;minor&gt;</i>. The major type identifies 
-which API the plugin implements. The minor type uniquely distinguishes a plugin 
-from other plugins that implement that same API, by such means as the intended 
-platform or the internal algorithm. For example, a plugin to interface to the 
-Maui scheduler would give its type as &quot;sched/maui.&quot; It would implement 
+<p>A SLURM plugin identifies itself by a short character string formatted similarly
+to a MIME type: <i>&lt;major&gt;/&lt;minor&gt;</i>. The major type identifies
+which API the plugin implements. The minor type uniquely distinguishes a plugin
+from other plugins that implement that same API, by such means as the intended
+platform or the internal algorithm. For example, a plugin to interface to the
+Maui scheduler would give its type as &quot;sched/maui.&quot; It would implement
 the SLURM Scheduler API.</p>
 <h2>Versioning</h2>
-<p>SLURM plugin version numbers comprise a major and minor revision number. As 
-SLURM evolves, changes to the individual plugin APIs may be necessary to implement 
-new features. The major number identifies the version of the applicable API that 
-the plugin implements. Incrementing the major version number denotes that the 
+<p>SLURM plugin version numbers comprise a major and minor revision number. As
+SLURM evolves, changes to the individual plugin APIs may be necessary to implement
+new features. The major number identifies the version of the applicable API that
+the plugin implements. Incrementing the major version number denotes that the
 API has changed significantly and possibly incompatibly over prior versions.</p>
-<p>Because plugins are separate code objects and perhaps under the control of 
-third parties, version skew may occur in a SLURM installation. SLURM may support 
-multiple versions of each API in a backward-compatible fashion to provide time 
-for plugin authors to update their plugins. Conversely, the plugin may support 
-multiple versions of the API in order to be transparently portable across different 
-SLURM installations. The version of the API spoken in an installation will be 
-the highest-numbered version which is common to both SLURM and the plugin. Each 
-SLURM release will document which API versions it supports. From time to time 
+<p>Because plugins are separate code objects and perhaps under the control of
+third parties, version skew may occur in a SLURM installation. SLURM may support
+multiple versions of each API in a backward-compatible fashion to provide time
+for plugin authors to update their plugins. Conversely, the plugin may support
+multiple versions of the API in order to be transparently portable across different
+SLURM installations. The version of the API spoken in an installation will be
+the highest-numbered version which is common to both SLURM and the plugin. Each
+SLURM release will document which API versions it supports. From time to time
 ancient API versions will be deprecated.</p>
-<p>The minor version number is incremented at the discretion of the plugin author 
-and denotes revisions or upgrades particular to that implementation. If two or 
-more plugins of the same type are provided in an installation, the plugin with 
+<p>The minor version number is incremented at the discretion of the plugin author
+and denotes revisions or upgrades particular to that implementation. If two or
+more plugins of the same type are provided in an installation, the plugin with
 the highest minor revision will be selected.</p>
 <p class="footer"><a href="#top">top</a></p>
 <h2>Data Objects</h2>
 <p>A plugin must define and export the following symbols:</p>
 <ul>
 <li><span class="commandline">char plugin_type[]<br>
-</span> a unique, short, formatted string to identify the plugin's purpose as 
-described above. A &quot;null&quot; plugin (i.e., one that implements the desired 
+</span> a unique, short, formatted string to identify the plugin's purpose as
+described above. A &quot;null&quot; plugin (i.e., one that implements the desired
 API as stubs) should have a minor type of &quot;none.&quot;</li>
 <li><span class="commandline">char plugin_name[]<br>
-</span> a free-form string that identifies the plugin in human-readable terms, 
-such as &quot;Kerberos authentication.&quot; SLURM will use this string to identify 
+</span> a free-form string that identifies the plugin in human-readable terms,
+such as &quot;Kerberos authentication.&quot; SLURM will use this string to identify
 the plugin to end users.</li>
 <li><span class="commandline">const uint32_t plugin_version</span><br>
-a 32-bit unsigned integer giving the version of the plugin as described above. 
-The major revision number is multiplied by 1,000 and added to the minor revision 
-number to produce the integer value. Thus, a plugin with a major revision number 
-of 2 and a minor revision number of 35 will have a <span class="commandline">plugin_version</span> 
+a 32-bit unsigned integer giving the version of the plugin as described above.
+The major revision number is multiplied by 1,000 and added to the minor revision
+number to produce the integer value. Thus, a plugin with a major revision number
+of 2 and a minor revision number of 35 will have a <span class="commandline">plugin_version</span>
 value of 2035.</li>
 </ul>
 <p>A plugin may optionally define and export the following symbols:</p>
 <ul>
 <li>const uint32_t plugin_legacy<br>
-a 32-bit unsigned integer formatted the same as <span class="commandline">plugin_version</span> 
-giving the lowest API version number with which this plugin is compatible. If 
-this symbol is omitted, its value is assumed to be equivalent to the <span class="commandline">plugin_version</span> 
-rounded to the next lowest 1,000. Only the major version number of this symbol 
+a 32-bit unsigned integer formatted the same as <span class="commandline">plugin_version</span>
+giving the lowest API version number with which this plugin is compatible. If
+this symbol is omitted, its value is assumed to be equivalent to the <span class="commandline">plugin_version</span>
+rounded to the next lowest 1,000. Only the major version number of this symbol
 is significant.</li>
 </ul>
 <p class="footer"><a href="#top">top</a></p>
 <h2>API Functions</h2>
 <p class="commandline">int init (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: If present, this function is called 
-just after the plugin is loaded. This allows the plugin to perform any global 
+<p style="margin-left:.2in"><b>Description</b>: If present, this function is called
+just after the plugin is loaded. This allows the plugin to perform any global
 initialization prior to any actual API calls.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: None.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the plugin's initialization 
-was successful. Any other return value indicates to SLURM that the plugin should 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the plugin's initialization
+was successful. Any other return value indicates to SLURM that the plugin should
 be unloaded and not used.</p>
 <p class="commandline">void fini (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: If present, this function is called 
-just before the plugin is unloaded. This allows the plugin to do any finalization 
+<p style="margin-left:.2in"><b>Description</b>: If present, this function is called
+just before the plugin is unloaded. This allows the plugin to do any finalization
 after the last plugin-specific API call is made.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: None.</p>
 <p style="margin-left:.2in"><b>Returns</b>: None.</p>
-<p><b>Note</b>: These functions are not the same as those described in the <span class="commandline">dlopen 
-(3)</span> system library. The C run-time system co-opts those symbols for its 
-own initialization. The system <span class="commandline">init()</span> is called 
-before the SLURM <span class="commandline">plugininit()</span>, and the SLURM 
+<p><b>Note</b>: These functions are not the same as those described in the <span class="commandline">dlopen
+(3)</span> system library. The C run-time system co-opts those symbols for its
+own initialization. The system <span class="commandline">init()</span> is called
+before the SLURM <span class="commandline">plugininit()</span>, and the SLURM
 <span class="commandline">fini()</span> is called before the system's <span class="commandline">fini()</span>.</p>
-<p>The functions need not appear. The plugin may provide either <span class="commandline">init()</span> 
+<p>The functions need not appear. The plugin may provide either <span class="commandline">init()</span>
 or <span class="commandline">fini()</span> or both.</p>
 <p class="footer"><a href="#top">top</a></p>
 <h2>Thread Safety</h2>
-<p>SLURM is a multithreaded application. The SLURM plugin library may exercise 
-the plugin functions in a re-entrant fashion. It is the responsibility of the 
-plugin author to provide the necessarily mutual exclusion and synchronization 
+<p>SLURM is a multithreaded application. The SLURM plugin library may exercise
+the plugin functions in a re-entrant fashion. It is the responsibility of the
+plugin author to provide the necessarily mutual exclusion and synchronization
 in order to avoid the pitfalls of re-entrant code.</p>
 <h2>Run-time Support</h2>
-<p>The standard system libraries are available to the plugin. The SLURM libraries 
-are also available and plugin authors are encouraged to make use of them rather 
-than develop their own substitutes. Plugins should use the SLURM log to print 
+<p>The standard system libraries are available to the plugin. The SLURM libraries
+are also available and plugin authors are encouraged to make use of them rather
+than develop their own substitutes. Plugins should use the SLURM log to print
 error messages.</p>
-<p>The plugin author is responsible for specifying any specific non-standard libraries 
-needed for correct operation. Plugins will not load if their dependent libraries 
-are not available, so it is the installer's job to make sure the specified libraries 
+<p>The plugin author is responsible for specifying any specific non-standard libraries
+needed for correct operation. Plugins will not load if their dependent libraries
+are not available, so it is the installer's job to make sure the specified libraries
 are available.</p>
 <h2>Performance</h2>
-<p>All plugin functions are expected to execute very quickly. If any function 
-entails delays (e.g. transactions with other systems), it should be written to 
-utilize a thread for that functionality. This thread may be created by the 
-<span class="commandline">init()</span> function and deleted by the 
+<p>All plugin functions are expected to execute very quickly. If any function
+entails delays (e.g. transactions with other systems), it should be written to
+utilize a thread for that functionality. This thread may be created by the
+<span class="commandline">init()</span> function and deleted by the
 <span class="commandline">fini()</span> functions. See <b>plugins/sched/backfill</b>
 for an example of how to do this.</p>
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/power_save.shtml b/doc/html/power_save.shtml
index 5cacecb00b6388d9e041e1759e6679b8f84e3a46..0aec4b931307133b1c8789f99cf3ffe6d9f74c00 100644
--- a/doc/html/power_save.shtml
+++ b/doc/html/power_save.shtml
@@ -3,28 +3,28 @@
 <h1>Power Saving Guide</h1>
 
 <p>SLURM provides an integrated power saving mechanism for idle nodes.
-Nodes that remain idle for a configurable period of time can be placed 
-in a power saving mode. 
+Nodes that remain idle for a configurable period of time can be placed
+in a power saving mode.
 The nodes will be restored to normal operation once work is assigned to them.
 Beginning with version 2.0.0, nodes can be fully powered down.
-Earlier releases of SLURM do not support the powering down of nodes, 
+Earlier releases of SLURM do not support the powering down of nodes,
 only support of reducing their performance and thus their power consumption.
-For example, power saving can be accomplished using a <i>cpufreq</i> governor 
+For example, power saving can be accomplished using a <i>cpufreq</i> governor
 that can change CPU frequency and voltage (note that the <i>cpufreq</i> driver
 must be enabled in the Linux kernel configuration).
-Of particular note, SLURM can power nodes up or down 
-at a configurable rate to prevent rapid changes in power demands. 
-For example, starting a 1000 node job on an idle cluster could result 
-in an instantaneous surge in power demand of multiple megawatts without 
+Of particular note, SLURM can power nodes up or down
+at a configurable rate to prevent rapid changes in power demands.
+For example, starting a 1000 node job on an idle cluster could result
+in an instantaneous surge in power demand of multiple megawatts without
 SLURM's support to increase power demands in a gradual fashion.</p>
 
 
 <h2>Configuration</h2>
 
-<p>A great deal of flexibility is offered in terms of when and 
-how idle nodes are put into or removed from power save mode. 
-Note that the SLURM control daemon, <i>slurmctld</i>, must be 
-restarted to initially enable power saving mode. 
+<p>A great deal of flexibility is offered in terms of when and
+how idle nodes are put into or removed from power save mode.
+Note that the SLURM control daemon, <i>slurmctld</i>, must be
+restarted to initially enable power saving mode.
 Changes in the configuration parameters (e.g. <i>SuspendTime</i>)
 will take effect after modifying the <i>slurm.conf</i> configuration
 file and executing "<i>scontrol reconfig</i>".
@@ -32,22 +32,22 @@ The following configuration parameters are available:
 <ul>
 
 <li><b>SuspendTime</b>:
-Nodes becomes eligible for power saving mode after being idle 
-for this number of seconds. 
+Nodes becomes eligible for power saving mode after being idle
+for this number of seconds.
 The configured value should exceed the time to suspend and resume a node.
 A negative number disables power saving mode.
 The default value is -1 (disabled).</li>
 
 <li><b>SuspendRate</b>:
-Maximum number of nodes to be placed into power saving mode 
-per minute. 
+Maximum number of nodes to be placed into power saving mode
+per minute.
 A value of zero results in no limits being imposed.
 The default value is 60.
 Use this to prevent rapid drops in power consumption.</li>
 
 <li><b>ResumeRate</b>:
-Maximum number of nodes to be removed from power saving mode 
-per minute. 
+Maximum number of nodes to be removed from power saving mode
+per minute.
 A value of zero results in no limits being imposed.
 The default value is 300.
 Use this to prevent rapid increases in power consumption.</li>
@@ -55,69 +55,69 @@ Use this to prevent rapid increases in power consumption.</li>
 <li><b>SuspendProgram</b>:
 Program to be executed to place nodes into power saving mode.
 The program executes as <i>SlurmUser</i> (as configured in
-<i>slurm.conf</i>). 
-The argument to the program will be the names of nodes to 
-be placed into power savings mode (using SLURM's hostlist 
+<i>slurm.conf</i>).
+The argument to the program will be the names of nodes to
+be placed into power savings mode (using SLURM's hostlist
 expression format).</li>
 
 <li><b>ResumeProgram</b>:
 Program to be executed to remove nodes from power saving mode.
 The program executes as <i>SlurmUser</i> (as configured in
-<i>slurm.conf</i>). 
-The argument to the program will be the names of nodes to 
-be removed from power savings mode (using SLURM's hostlist 
+<i>slurm.conf</i>).
+The argument to the program will be the names of nodes to
+be removed from power savings mode (using SLURM's hostlist
 expression format).
 This program may use the <i>scontrol show node</i> command
-to insure that a node has booted and the <i>slurmd</i> 
-daemon started. 
+to insure that a node has booted and the <i>slurmd</i>
+daemon started.
 If the <i>slurmd</i> daemon fails to respond within the
-configured <b>SlurmdTimeout</b> value, the node will be 
+configured <b>SlurmdTimeout</b> value, the node will be
 placed in a DOWN state and the job requesting the node
 will be requeued.
-For reasons of reliability, <b>ResumeProgram</b> may execute 
-more than once for a node when the <b>slurmctld</b> daemon 
+For reasons of reliability, <b>ResumeProgram</b> may execute
+more than once for a node when the <b>slurmctld</b> daemon
 crashes and is restarted.</li>
 
 <li><b>SuspendTimeout</b>:
-Maximum time permitted (in second) between when a node suspend request 
-is issued and when the node shutdown is complete. 
-At that time the node must ready for a resume request to be issued 
-as needed for new workload.  
+Maximum time permitted (in second) between when a node suspend request
+is issued and when the node shutdown is complete.
+At that time the node must ready for a resume request to be issued
+as needed for new workload.
 The default value is 30 seconds.</li>
 
 <li><b>ResumeTimeout</b>:
-Maximum time permitted (in second) between when a node resume request 
-is issued and when the node is actually available for use. 
+Maximum time permitted (in second) between when a node resume request
+is issued and when the node is actually available for use.
 Nodes which fail to respond in this time frame may be marked DOWN and
 the jobs scheduled on the node requeued.
 The default value is 60 seconds.</li>
 
 <li><b>SuspendExcNodes</b>:
-List of nodes to never place in power saving mode. 
+List of nodes to never place in power saving mode.
 Use SLURM's hostlist expression format.
 By default, no nodes are excluded.</li>
 
 <li><b>SuspendExcParts</b>:
-List of partitions with nodes to never place in power saving mode. 
+List of partitions with nodes to never place in power saving mode.
 Multiple partitions may be specified using a comma separator.
 By default, no nodes are excluded.</li>
 </ul></p>
 
-<p>Note that <i>SuspendProgram</i> and <i>ResumeProgram</i> execute as 
+<p>Note that <i>SuspendProgram</i> and <i>ResumeProgram</i> execute as
 <i>SlurmUser</i> on the node where the <i>slurmctld</i> daemon runs
-(primary and backup server nodes). 
-Use of <i>sudo</i> may be required for <i>SlurmUser</i>to power down 
+(primary and backup server nodes).
+Use of <i>sudo</i> may be required for <i>SlurmUser</i>to power down
 and restart nodes.
 If you need to convert SLURM's hostlist expression into individual node
 names, the <i>scontrol show hostnames</i> command may prove useful.
 The commands used to boot or shut down nodes will depend upon your
 cluster management tools.</p>
 
-<p>Note that <i>SuspendProgram</i> and <i>ResumeProgram</i> are not 
+<p>Note that <i>SuspendProgram</i> and <i>ResumeProgram</i> are not
 subject to any time limits.
 They should perform the required action, ideally verify the action
 (e.g. node boot and start the <i>slurmd</i> daemon, thus the node is
-no longer non-responsive to <i>slurmctld</i>) and terminate. 
+no longer non-responsive to <i>slurmctld</i>) and terminate.
 Long running programs will be logged by <i>slurmctld</i>, but not
 aborted.</p>
 
@@ -145,23 +145,23 @@ do
 done
 </pre>
 
-<p>Subject to the various rates, limits and exclusions, the power save 
+<p>Subject to the various rates, limits and exclusions, the power save
 code follows this logic:
 <ol>
 <li>Identify nodes which have been idle for at least <b>SuspendTime</b>.</li>
 <li>Execute <b>SuspendProgram</b> with an argument of the idle node names.</li>
-<li>Identify the nodes which are in power save mode (a flag in the node's 
+<li>Identify the nodes which are in power save mode (a flag in the node's
 state field), but have been allocated to jobs.</li>
 <li>Execute <b>ResumeProgram</b> with an argument of the allocated node names.</li>
-<li>Once the <i>slurmd</i> responds, initiate the job and/or job steps 
+<li>Once the <i>slurmd</i> responds, initiate the job and/or job steps
 allocated to it.</li>
-<li>If the <i>slurmd</i> fails to respond within the value configured for 
+<li>If the <i>slurmd</i> fails to respond within the value configured for
 <b>SlurmdTimeout</b>, the node will be marked DOWN and the job requeued
 if possible.</li>
 <li>Repeat indefinitely.</li>
 </ol></p>
 
-<p>The slurmctld daemon will periodically (every 10 minutes) log how many 
+<p>The slurmctld daemon will periodically (every 10 minutes) log how many
 nodes are in power save mode using messages of this sort:
 <pre>
 [May 02 15:31:25] Power save mode 0 nodes
@@ -171,66 +171,66 @@ nodes are in power save mode using messages of this sort:
 [May 02 15:51:28] Power save mode 22 nodes
 </pre>
 
-<p>Using these logs you can easily see the effect of SLURM's power saving 
+<p>Using these logs you can easily see the effect of SLURM's power saving
 support.
-You can also configure SLURM with programs that perform no action as <b>SuspendProgram</b> and <b>ResumeProgram</b> to assess the potential 
+You can also configure SLURM with programs that perform no action as <b>SuspendProgram</b> and <b>ResumeProgram</b> to assess the potential
 impact of power saving mode before enabling it.</p>
 
 <h2>Use of Allocations</h2>
 
 <p>A resource allocation request will be granted as soon as resources
-are selected for use, possibly before the nodes are all available 
+are selected for use, possibly before the nodes are all available
 for use.
-The launching of job steps will be delayed until the required nodes 
+The launching of job steps will be delayed until the required nodes
 have been restored to service (it prints a warning about waiting for
-nodes to become available and periodically retries until they are 
+nodes to become available and periodically retries until they are
 available).</p>
 
 <p>In the case of an <i>sbatch</i> command, the batch program will start
 when node zero of the allocation is ready for use and pre-processing can
 be performed as needed before using <i>srun</i> to launch job steps.
-The operation of <i>salloc</i> and <i>srun</i> follow a similar pattern 
-of getting an job allocation at one time, but possibly being unable to 
-launch job steps until later. 
+The operation of <i>salloc</i> and <i>srun</i> follow a similar pattern
+of getting an job allocation at one time, but possibly being unable to
+launch job steps until later.
 If <i>ssh</i> or some other tools is used by <i>salloc</i> it may be
 desirable to execute "<i>srun /bin/true</i>" or some other command
-first to insure that all nodes are booted and ready for use. 
+first to insure that all nodes are booted and ready for use.
 We plan to add a job and node state of <i>CONFIGURING</i> in SLURM
 version 2.1, which could be used to prevent salloc from executing
-any processes (including <i>ssh</i>) until all of the nodes are 
+any processes (including <i>ssh</i>) until all of the nodes are
 ready for use.</p>
 
 <h2>Fault Tolerance</h2>
 
 <p>If the <i>slurmctld</i> daemon is terminated gracefully, it will
 wait up to <b>SuspendTimeout</b> or <b>ResumeTimeout</b> (whichever
-is larger) for any spawned <b>SuspendProgram</b> or 
-<b>ResumeProgram</b> to terminate before the daemon terminates. 
-If the spawned program does not terminate within that time period, 
-the event will be logged and <i>slurmctld</i> will exit in order to 
+is larger) for any spawned <b>SuspendProgram</b> or
+<b>ResumeProgram</b> to terminate before the daemon terminates.
+If the spawned program does not terminate within that time period,
+the event will be logged and <i>slurmctld</i> will exit in order to
 permit another <i>slurmctld</i> daemon to be initiated.
-Synchronization problems could also occur when the <i>slurmctld</i> 
+Synchronization problems could also occur when the <i>slurmctld</i>
 daemon crashes (a rare event) and is restarted. </p>
 
-<p>In either event, the newly initiated <i>slurmctld</i> daemon (or 
-the backup server) will recover saved node state information that 
+<p>In either event, the newly initiated <i>slurmctld</i> daemon (or
+the backup server) will recover saved node state information that
 may not accurately describe the actual node state.
-In the case of a failed <b>SuspendProgram</b>, the negative impact is 
-limited to increased power consumption, so no special action is 
-currently taken to execute <b>SuspendProgram</b> multiple times in 
+In the case of a failed <b>SuspendProgram</b>, the negative impact is
+limited to increased power consumption, so no special action is
+currently taken to execute <b>SuspendProgram</b> multiple times in
 order to insure the node is in a reduced power mode.
-The case of a failed <b>ResumeProgram</b> is more serious in that the 
+The case of a failed <b>ResumeProgram</b> is more serious in that the
 node could be placed into a DOWN state and/or jobs could fail.
-In order to minimize this risk, when the <i>slurmctld</i> daemon is 
-started and node which should be allocated to a job fails to respond, 
+In order to minimize this risk, when the <i>slurmctld</i> daemon is
+started and node which should be allocated to a job fails to respond,
 the <b>ResumeProgram</b> will be executed (possibly for a second time).</p>
 
 <h2>Booting Different Images</h2>
 
-<p>SLURM's <b>PrologSlurmctld</b> configuration parameter can identify a 
-program to boot different operating system images for each job based upon it's 
+<p>SLURM's <b>PrologSlurmctld</b> configuration parameter can identify a
+program to boot different operating system images for each job based upon it's
 constraint field (or possibly comment).
-If you want <b>ResumeProgram</b> to boot a various images according to 
+If you want <b>ResumeProgram</b> to boot a various images according to
 job specifications, it will need to be a fairly sophisticated program
 and perform the following actions:
 <ol>
diff --git a/doc/html/preempt.shtml b/doc/html/preempt.shtml
index adbe05226b50a59beee340725c34f5d92aea34dc..d5389ee71de34599a5f16eedebd47b096e801286 100644
--- a/doc/html/preempt.shtml
+++ b/doc/html/preempt.shtml
@@ -4,34 +4,34 @@
 
 <P>
 SLURM supports job preemption, the act of stopping one or more "low-priority"
-jobs to let a "high-priority" job run uninterrupted until it completes. 
-Job preemption is implemented as a variation of SLURM's 
+jobs to let a "high-priority" job run uninterrupted until it completes.
+Job preemption is implemented as a variation of SLURM's
 <a href="gang_scheduling.html">Gang Scheduling</a> logic.
 When a high-priority job has been allocated resources that have already been
-allocated to one or more low priority jobs, the low priority job(s) are 
-preempted. 
+allocated to one or more low priority jobs, the low priority job(s) are
+preempted.
 The low priority job(s) can resume once the high priority job completes.
-Alternately, the low priority job(s) can be requeued and started using other 
+Alternately, the low priority job(s) can be requeued and started using other
 resources if so configured in newer versions of SLURM.
 </P>
 <P>
-In SLURM version 2.0 and earlier, high priority work is identified by the 
+In SLURM version 2.0 and earlier, high priority work is identified by the
 priority of the job's partition and low priority jobs are always suspended.
 The job preemption logic is within the <I>sched/gang</I> plugin.
-In SLURM version 2.1 and higher, the job's partition priority or its 
-Quality Of Service (QOS) can be used to identify the which jobs can preempt 
+In SLURM version 2.1 and higher, the job's partition priority or its
+Quality Of Service (QOS) can be used to identify the which jobs can preempt
 or be preempted by other jobs.
 </P>
 <P>
 SLURM version 2.1 offers several options for the job preemption mechanism
 including checkpoint, requeue, or cancel.
-the option of requeuing low priority jobs 
+the option of requeuing low priority jobs
 Checkpointed jobs are not automatically requeued or restarted.
 Requeued jobs may restart faster by using different resources.
-All of these new job preemption mechanisms release a job's memory space for 
+All of these new job preemption mechanisms release a job's memory space for
 use by other jobs.
 In SLURM version 2.1, some job preemption logic was moved into the
-<I>select</I> plugin and main code base to permit use of both job preemption 
+<I>select</I> plugin and main code base to permit use of both job preemption
 plus the backfill scheduler plugin, <i>sched/backfill</I>.
 </P>
 
@@ -41,28 +41,28 @@ There are several important configuration parameters relating to preemption:
 </P>
 <UL>
 <LI>
-<B>SelectType</B>: SLURM job preemption logic supports nodes allocated by the 
-<I>select/linear</I> plugin and socket/core/CPU resources allocated by the 
+<B>SelectType</B>: SLURM job preemption logic supports nodes allocated by the
+<I>select/linear</I> plugin and socket/core/CPU resources allocated by the
 <I>select/cons_res</I> plugin.
 </LI>
 <LI>
-<B>SelectTypeParameter</B>: Since resources may be getting over-allocated 
+<B>SelectTypeParameter</B>: Since resources may be getting over-allocated
 with jobs (suspended jobs remain in memory), the resource selection
 plugin should be configured to track the amount of memory used by each job to
 ensure that memory page swapping does not occur. When <I>select/linear</I> is
 chosen, we recommend setting <I>SelectTypeParameter=CR_Memory</I>. When
 <I>select/cons_res</I> is chosen, we recommend including Memory as a resource
-(ex. <I>SelectTypeParameter=CR_Core_Memory</I>). 
+(ex. <I>SelectTypeParameter=CR_Core_Memory</I>).
 <BR><B>NOTE:</B> Unless <I>PreemptMode=SUSPEND,GANG</I> these memory management
 parameters are not critical.
 </LI>
 <LI>
-<B>DefMemPerCPU</B>: Since job requests may not explicitly specify 
-a memory requirement, we also recommend configuring 
-<I>DefMemPerCPU</I> (default memory per allocated CPU) or 
-<I>DefMemPerNode</I> (default memory per allocated node). 
-It may also be desirable to configure 
-<I>MaxMemPerCPU</I> (maximum memory per allocated CPU) or 
+<B>DefMemPerCPU</B>: Since job requests may not explicitly specify
+a memory requirement, we also recommend configuring
+<I>DefMemPerCPU</I> (default memory per allocated CPU) or
+<I>DefMemPerNode</I> (default memory per allocated node).
+It may also be desirable to configure
+<I>MaxMemPerCPU</I> (maximum memory per allocated CPU) or
 <I>MaxMemPerNode</I> (maximum memory per allocated node) in <I>slurm.conf</I>.
 Users can use the <I>--mem</I> or <I>--mem-per-cpu</I> option
 at job submission time to specify their memory requirements.
@@ -82,17 +82,17 @@ other jobs sharing the same resources.
 parameters are not critical.
 </LI>
 <LI>
-<B>PreemptMode</B>: Configure to <I>CANCEL</I>, <I>CHECKPOINT</I>, 
-<I>SUSPEND</I> or <I>REQUEUE</I> depending on the desired action for low 
-priority jobs. 
+<B>PreemptMode</B>: Configure to <I>CANCEL</I>, <I>CHECKPOINT</I>,
+<I>SUSPEND</I> or <I>REQUEUE</I> depending on the desired action for low
+priority jobs.
 <UL>
 <LI>A value of <I>CANCEL</I> will always cancel the job.</LI>
-<LI>A value of <I>CHECKPOINT</I> will checkpoint (if possible) or kill low 
+<LI>A value of <I>CHECKPOINT</I> will checkpoint (if possible) or kill low
 priority jobs.</LI>
 Checkpointed jobs are not automatically restarted.
-<LI>A value of <I>REQUEUE</I> will requeue (if possible) or kill low priority 
+<LI>A value of <I>REQUEUE</I> will requeue (if possible) or kill low priority
 jobs. Requeued jobs are permitted to be restarted on different resources.</LI>
-<LI>A value of <I>SUSPEND</I> will suspend and automatically resume the low 
+<LI>A value of <I>SUSPEND</I> will suspend and automatically resume the low
 priority jobs. The <I>SUSPEND</I> option must be used with the <I>GANG</I>
 option (e.g. "PreemptMode=SUSPEND,GANG").</LI>
 </UL>
@@ -103,20 +103,20 @@ which jobs can preempt other jobs.
 <UL>
 <LI><I>preempt/none</I> indicates that jobs will not preempt each other
 (default).</LI>
-<LI><I>preempt/partition_prio</I> indicates that jobs from one partition 
+<LI><I>preempt/partition_prio</I> indicates that jobs from one partition
 can preempt jobs from lower priority partitions.</LI>
-<LI><I>preempt/qos</I> indicates that jobs from one Quality Of Service (QOS) 
-can preempt jobs from a lower QOS. These jobs can be in the same partition 
-or different partitions. PreemptMode must be set to CANCEL, CHECKPOINT, 
+<LI><I>preempt/qos</I> indicates that jobs from one Quality Of Service (QOS)
+can preempt jobs from a lower QOS. These jobs can be in the same partition
+or different partitions. PreemptMode must be set to CANCEL, CHECKPOINT,
 SUSPEND or REQUEUE. This option requires the use of a database identifying
 available QOS and their preemption rules. </LI>
 </UL>
 </LI>
 <LI>
 <B>Priority</B>: Configure the partition's <I>Priority</I> setting relative to
-other partitions to control the preemptive behavior when 
-<I>PreemptType=preempt/partition_prio</I>. 
-This option is not relevant if <I>PreemptType=preempt/qos</I>. 
+other partitions to control the preemptive behavior when
+<I>PreemptType=preempt/partition_prio</I>.
+This option is not relevant if <I>PreemptType=preempt/qos</I>.
 If two jobs from two
 different partitions are allocated to the same resources, the job in the
 partition with the greater <I>Priority</I> value will preempt the job in the
@@ -127,13 +127,13 @@ of the two partitions are equal then no preemption will occur. The default
 partition <I>Priority</I> is not critical.
 </LI>
 <LI>
-<B>Shared</B>: Configure the partition's <I>Shared</I> setting to 
-<I>FORCE</I> for all partitions in which job preemption is to take place. 
-The <I>FORCE</I> option supports an additional parameter that controls 
-how many jobs can share a resource (FORCE[:max_share]). By default the 
+<B>Shared</B>: Configure the partition's <I>Shared</I> setting to
+<I>FORCE</I> for all partitions in which job preemption is to take place.
+The <I>FORCE</I> option supports an additional parameter that controls
+how many jobs can share a resource (FORCE[:max_share]). By default the
 max_share value is 4. In order to preempt jobs (and not gang schedule them),
-always set max_share to 1. To allow up to 2 jobs from this partition to be 
-allocated to a common resource (and gang scheduled), set 
+always set max_share to 1. To allow up to 2 jobs from this partition to be
+allocated to a common resource (and gang scheduled), set
 <I>Shared=FORCE:2</I>.
 </LI>
 </UL>
@@ -148,54 +148,54 @@ SLURM requires a full restart of the daemons. If you just change the partition
 <H2>Preemption Design and Operation</H2>
 
 <P>
-The select plugin will identify resources where a pending job can begin 
+The select plugin will identify resources where a pending job can begin
 execution.
-When <I>PreemptMode</I> is configured to CANCEL, CHECKPOINT, SUSPEND or 
-REQUEUE, the select plugin will also preempt running jobs as needed to 
-initiate the pending job. 
-When <I>PreemptMode=SUSPEND,GANG</I> the select plugin will initiate the 
-pending job and rely upon the gang scheduling logic to perform job suspend 
+When <I>PreemptMode</I> is configured to CANCEL, CHECKPOINT, SUSPEND or
+REQUEUE, the select plugin will also preempt running jobs as needed to
+initiate the pending job.
+When <I>PreemptMode=SUSPEND,GANG</I> the select plugin will initiate the
+pending job and rely upon the gang scheduling logic to perform job suspend
 and resume as described below.
 </P>
 <P>
-When enabled, the gang scheduling logic (which is also supports job 
-preemption) keeps track of the resources allocated to all jobs. 
-For each partition an "active bitmap" is maintained that tracks all 
-concurrently running jobs in the SLURM cluster. 
-Each partition also maintains a job list for that partition, and a list of 
-"shadow" jobs. 
-The "shadow" jobs are high priority job allocations that "cast shadows" on the 
-active bitmaps of the low priority jobs. 
+When enabled, the gang scheduling logic (which is also supports job
+preemption) keeps track of the resources allocated to all jobs.
+For each partition an "active bitmap" is maintained that tracks all
+concurrently running jobs in the SLURM cluster.
+Each partition also maintains a job list for that partition, and a list of
+"shadow" jobs.
+The "shadow" jobs are high priority job allocations that "cast shadows" on the
+active bitmaps of the low priority jobs.
 Jobs caught in these "shadows" will be preempted.
 </P>
 <P>
-Each time a new job is allocated to resources in a partition and begins 
+Each time a new job is allocated to resources in a partition and begins
 running, the gang scheduler adds a "shadow" of this job to all lower priority
-partitions. 
-The active bitmap of these lower priority partitions are then rebuilt, with the shadow jobs added first. 
-Any existing jobs that were replaced by one or more "shadow" jobs are 
-suspended (preempted). Conversely, when a high priority running job completes, 
-it's "shadow" goes away and the active bitmaps of the lower priority 
+partitions.
+The active bitmap of these lower priority partitions are then rebuilt, with the shadow jobs added first.
+Any existing jobs that were replaced by one or more "shadow" jobs are
+suspended (preempted). Conversely, when a high priority running job completes,
+it's "shadow" goes away and the active bitmaps of the lower priority
 partitions are rebuilt to see if any suspended jobs can be resumed.
 </P>
 <P>
 The gang scheduler plugin is designed to be <I>reactive</I> to the resource
-allocation decisions made by the "select" plugins. 
-The "select" plugins have been enhanced to recognize when job preemption has 
-been configured, and to factor in the priority of each partition when selecting resources for a job. 
-When choosing resources for each job, the selector avoids resources that are 
-in use by other jobs (unless sharing has been configured, in which case it 
-does some load-balancing). 
-However, when job preemption is enabled, the select plugins may choose 
+allocation decisions made by the "select" plugins.
+The "select" plugins have been enhanced to recognize when job preemption has
+been configured, and to factor in the priority of each partition when selecting resources for a job.
+When choosing resources for each job, the selector avoids resources that are
+in use by other jobs (unless sharing has been configured, in which case it
+does some load-balancing).
+However, when job preemption is enabled, the select plugins may choose
 resources that are already in use by jobs from partitions with a lower
 priority setting, even when sharing is disabled in those partitions.
 </P>
 <P>
-This leaves the gang scheduler in charge of controlling which jobs should run 
-on the over-allocated resources. 
-If <I>PreemptMode=SUSPEND</I>, jobs are suspended using the same internal 
-functions that support <I>scontrol suspend</I> and <I>scontrol resume</I>. 
-A good way to observe the operation of the gang scheduler is by running 
+This leaves the gang scheduler in charge of controlling which jobs should run
+on the over-allocated resources.
+If <I>PreemptMode=SUSPEND</I>, jobs are suspended using the same internal
+functions that support <I>scontrol suspend</I> and <I>scontrol resume</I>.
+A good way to observe the operation of the gang scheduler is by running
 <I>squeue -i&lt;time&gt;</I> in a terminal window.
 </P>
 
@@ -203,7 +203,7 @@ A good way to observe the operation of the gang scheduler is by running
 
 <P>
 The following example is configured with <I>select/linear</I> and
-<I>PreemptMode=SUSPEND,GANG</I>. 
+<I>PreemptMode=SUSPEND,GANG</I>.
 This example takes place on a cluster of 5 nodes:
 </P>
 <PRE>
@@ -284,8 +284,8 @@ JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
 
 <P>
 <B>More intelligence in the select plugins</B>: This implementation of
-preemption relies on intelligent job placement by the <I>select</I> plugins. 
-In SLURM version 2.0 preemptive placement support was added to the 
+preemption relies on intelligent job placement by the <I>select</I> plugins.
+In SLURM version 2.0 preemptive placement support was added to the
 SelectType plugins, but there is still room for improvement.
 </P><P>
 Take the following example:
diff --git a/doc/html/preemption_plugins.shtml b/doc/html/preemption_plugins.shtml
index 1097a59a76ec7be509156e9ab417d5ba25577fa7..09b415c4dfb24d164d2e2defc103eb5fd3b8dfdf 100644
--- a/doc/html/preemption_plugins.shtml
+++ b/doc/html/preemption_plugins.shtml
@@ -8,7 +8,7 @@ them. It is intended as a resource to programmers wishing to write their own
 SLURM preemption plugins. This is version 100 of the API.</p>
 
 <p>SLURM preemption plugins are SLURM plugins that identify which jobs
-can be preempted by a pending job. They must conform to the SLURM Plugin 
+can be preempted by a pending job. They must conform to the SLURM Plugin
 API with the following specifications:</p>
 
 <p><span class="commandline">const char
@@ -19,9 +19,9 @@ We recommend, for example:</p>
 
 <ul>
 <li><b>none</b>&#151;This plugin prevents any job preemption.</li>
-<li><b>partition_prio</b>&#151;This plugin permit pending jobs from one 
+<li><b>partition_prio</b>&#151;This plugin permit pending jobs from one
 partition to preempt jobs from a lower priority partition.</li>
-<li><b>qos</b>&#151;This plugin permits jobs to preempt others based 
+<li><b>qos</b>&#151;This plugin permits jobs to preempt others based
 upon their Quality Of Service values as defined in the SLURM database.</li>
 </ul>
 
@@ -31,29 +31,29 @@ Plugin API require no specialization for job preemption support.
 Note carefully, however, the versioning discussion below.</p>
 
 <p>The programmer is urged to study
-<span class="commandline">src/plugins/preempt/partition_prio/preempt_partition_prio.c</span> 
+<span class="commandline">src/plugins/preempt/partition_prio/preempt_partition_prio.c</span>
 for an example implementation of a SLURM preemption plugin.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented 
+<p>The following functions must appear. Functions which are not implemented
 should be stubbed.</p>
 
 <p class="commandline">List find_preemptable_jobs(
 struct job_record *job_ptr)</p>
-<p style="margin-left:.2in"><b>Description</b>: Identifies the jobs 
+<p style="margin-left:.2in"><b>Description</b>: Identifies the jobs
 which can be preempted by a specific pending job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline">job_ptr</span> (input) a pointer to the 
+<span class="commandline">job_ptr</span> (input) a pointer to the
 pending job which is attempting to be started</p>
-<p style="margin-left:.2in"><b>Returns</b>: A list of pointers to 
-jobs which may be preempted. 
-The list should be be released using the <i>list_destroy</i> function when 
+<p style="margin-left:.2in"><b>Returns</b>: A list of pointers to
+jobs which may be preempted.
+The list should be be released using the <i>list_destroy</i> function when
 no longer required.
 This list should be sorted in order from most attractive to
 preempt to least attractive to preempt (e.g. lowest to highest priority).
-For example, even within a given partition or QOS one might want to 
+For example, even within a given partition or QOS one might want to
 smaller jobs first.</p>
 
 <h2>Versioning</h2>
diff --git a/doc/html/priority_multifactor.shtml b/doc/html/priority_multifactor.shtml
index 06234de71e4d8a2b939d5d61b55471ff99761c66..1cac0c9bba5fd69caff586f604d095a7866ab042 100644
--- a/doc/html/priority_multifactor.shtml
+++ b/doc/html/priority_multifactor.shtml
@@ -226,7 +226,7 @@ Where:
   seconds or days as appropriate for each site.  The measurement
   period is nominally 5 minutes.  The decay factor, D, is assigned the
   value that will achieve the half-life decay rate specified by
-  the <i>PriorityDecayHalfLife</i> parameter.</P> 
+  the <i>PriorityDecayHalfLife</i> parameter.</P>
 
 <P> The historical resources a machine has available could be similarly aggregated with the same decay factor:</P>
 
@@ -438,9 +438,9 @@ factor as it is currently configured.</P>
   composite usage value.  The higher the number, the longer past usage
   affects fair-share.  If set to 0 no decay will be applied.  This is helpful if
   you want to enforce hard time limits per association.  If set to 0
-  PriorityUsageResetPeriod must be set to some interval. 
+  PriorityUsageResetPeriod must be set to some interval.
   The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
-  days-hr).  The default value is 7-0 (7 days). 
+  days-hr).  The default value is 7-0 (7 days).
 <DT> PriorityUsageResetPeriod
 <DD> At this interval the usage of associations will be reset to 0.
   This is used if you want to enforce hard limits of time usage per
@@ -451,7 +451,7 @@ factor as it is currently configured.</P>
   running on your cluster, but if your schema is set up to only allow
   certain amounts of time on your system this is the way to do it.
   The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
-  days-hr). The default value is not set (turned off). 
+  days-hr). The default value is not set (turned off).
 
 <DT> PriorityFavorSmall
 <DD> A boolean that sets the polarity of the job size factor.  The
@@ -461,7 +461,7 @@ factor as it is currently configured.</P>
 <DT> PriorityMaxAge
 <DD> Specifies the queue wait time at which the age factor maxes out.
   The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
-  days-hr).  The default value is 7-0 (7 days). 
+  days-hr).  The default value is 7-0 (7 days).
 <DT> PriorityWeightAge
 <DD> An unsigned integer that scales the contribution of the age factor.
 <DT> PriorityWeightFairshare
@@ -486,7 +486,7 @@ factor as it is currently configured.</P>
 <P> The first example is for running the plugin applying decay over
   time to reduce usage.  Hard limits can be used in this
   configuration, but will have less effect since usage will decay
-  over time instead of having no decay over time.</P> 
+  over time instead of having no decay over time.</P>
 <PRE>
 # Activate the Multi-factor Job Priority Plugin with decay
 PriorityType=priority/multifactor
diff --git a/doc/html/priority_plugins.shtml b/doc/html/priority_plugins.shtml
index 222e494ece62c749dcf12ca8ed7c4e09b824b743..b5053c8babc517fdb9244d42317a0264b64f1049 100644
--- a/doc/html/priority_plugins.shtml
+++ b/doc/html/priority_plugins.shtml
@@ -29,7 +29,7 @@ Plugin API require no specialization for job priority support.
 Note carefully, however, the versioning discussion below.</p>
 
 <p>The programmer is urged to study
-<span class="commandline">src/plugins/priority/basic/priority_basic.c</span> 
+<span class="commandline">src/plugins/priority/basic/priority_basic.c</span>
 for an example implementation of a SLURM priority plugin.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/proctrack_plugins.shtml b/doc/html/proctrack_plugins.shtml
index 9c529a8a55b5e6f3d784a36b251b86476723c368..a3c4a17e693b84a9029be4711f77cd82404a2d0b 100644
--- a/doc/html/proctrack_plugins.shtml
+++ b/doc/html/proctrack_plugins.shtml
@@ -3,109 +3,109 @@
 <h1><a name="top">SLURM Process Tracking Plugin API</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM process tracking plugins and the API 
-that defines them. 
-It is intended as a resource to programmers wishing to write their 
-own SLURM process tracking plugins. 
+<p> This document describes SLURM process tracking plugins and the API
+that defines them.
+It is intended as a resource to programmers wishing to write their
+own SLURM process tracking plugins.
 This is version 0 of the API.</p>
 
-<p>SLURM process tracking plugins are SLURM plugins that implement 
-the SLURM process tracking API described herein. 
-They must conform to the SLURM Plugin API with the following 
+<p>SLURM process tracking plugins are SLURM plugins that implement
+the SLURM process tracking API described herein.
+They must conform to the SLURM Plugin API with the following
 specifications:</p>
 
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;proctrack.&quot; 
-The minor type can be any recognizable abbreviation for the type 
+The major type must be &quot;proctrack.&quot;
+The minor type can be any recognizable abbreviation for the type
 of proctrack. We recommend, for example:</p>
 <ul>
-<li><b>aix</b>&#151;Perform process tracking on an AIX platform. 
-NOTE: This requires a kernel extension that records 
+<li><b>aix</b>&#151;Perform process tracking on an AIX platform.
+NOTE: This requires a kernel extension that records
 ever process creation and termination.</li>
-<li><b>linuxproc</b>&#151;Perform process tracking based upon a scan 
-of the Linux process table and use the parent process ID to determine 
-what processes are members of a SLURM job. NOTE: This mechanism is 
+<li><b>linuxproc</b>&#151;Perform process tracking based upon a scan
+of the Linux process table and use the parent process ID to determine
+what processes are members of a SLURM job. NOTE: This mechanism is
 not entirely reliable for process tracking.</li>
 <li><b>pgid</b>&#151;Use process group ID to determine
 what processes are members of a SLURM job. NOTE: This mechanism is
 not entirely reliable for process tracking.</li>
-<li><b>rms</b>&#151;Use a Quadrics RMS kernel patch to 
+<li><b>rms</b>&#151;Use a Quadrics RMS kernel patch to
 establish what processes are members of a SLURM job.
 NOTE: This requires a kernel patch that records
 every process creation and termination.</li>
 <li><b>sgj_job</b>&#151;Use <a href="http://oss.sgi.com/projects/pagg/">
-SGI's Process Aggregates (PAGG) kernel module</a>. 
-NOTE: This kernel module records every process creation 
+SGI's Process Aggregates (PAGG) kernel module</a>.
+NOTE: This kernel module records every process creation
 and termination.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> symbols required 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span> symbols required
 by the SLURM Plugin API require no specialization for process tracking.
 Note carefully, however, the versioning discussion below.</p>
 
-<p>The programmer is urged to study 
-<span class="commandline">src/plugins/proctrack/pgid/proctrack_pgid.c</span> 
+<p>The programmer is urged to study
+<span class="commandline">src/plugins/proctrack/pgid/proctrack_pgid.c</span>
 for an example implementation of a SLURM proctrack plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
 <p> The implementation must support a container id of type uint32_t.
-This container ID is maintained by the plugin directly in the slurmd 
+This container ID is maintained by the plugin directly in the slurmd
 job structure using the field named <i>cont_id</i>.</p>
 
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <b>errno</b> to allow SLURM to discover as practically as possible 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <b>errno</b> to allow SLURM to discover as practically as possible
 the reason for any failed API call.
-These values must not be used as return values in integer-valued functions 
-in the API. 
-The proper error return value from integer-valued functions is SLURM_ERROR. 
-The implementation should endeavor to provide useful and pertinent information 
-by whatever means is practical. 
+These values must not be used as return values in integer-valued functions
+in the API.
+The proper error return value from integer-valued functions is SLURM_ERROR.
+The implementation should endeavor to provide useful and pertinent information
+by whatever means is practical.
 Successful API calls are not required to reset errno to a known value.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <p class="commandline">int slurm_container_create (slurmd_job_t *job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Create a container.
-The container should be valid   
+The container should be valid
 <span class="commandline">slurm_container_destroy()</span> is called.
-This function must put the container ID directory in the job structure's 
+This function must put the container ID directory in the job structure's
 variable <i>cont_id</i>.</p>
-<p style="margin-left:.2in"><b>Argument</b>: 
-<span class="commandline"> job</span>&nbsp; &nbsp;&nbsp;(input/output) 
+<p style="margin-left:.2in"><b>Argument</b>:
+<span class="commandline"> job</span>&nbsp; &nbsp;&nbsp;(input/output)
 Pointer to a slurmd job structure.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_container_add (slurmd_job_t *job, pid_t pid);</p>
-<p style="margin-left:.2in"><b>Description</b>: Add a specific process ID 
+<p style="margin-left:.2in"><b>Description</b>: Add a specific process ID
 to a given job's container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> job</span>&nbsp; &nbsp;&nbsp;(input) 
+<span class="commandline"> job</span>&nbsp; &nbsp;&nbsp;(input)
 Pointer to a slurmd job structure.<br>
 <span class="commandline"> pid</span>&nbsp; &nbsp;&nbsp;(input)
 The ID of the process to add to this job's container.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_container_signal (uint32_t id, int signal);</p>
-<p style="margin-left:.2in"><b>Description</b>: Signal all processes in a given 
+<p style="margin-left:.2in"><b>Description</b>: Signal all processes in a given
 job's container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> id</span> &nbsp;&nbsp;(input) 
+<span class="commandline"> id</span> &nbsp;&nbsp;(input)
 Job container's ID.<br>
 <span class="commandline"> signal</span> &nbsp;&nbsp;(input)
-Signal to be sent to processes. Note that a signal of zero 
+Signal to be sent to processes. Note that a signal of zero
 just tests for the existence of processes in a given job container.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the signal 
-was sent. 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the signal
+was sent.
 If the signal can not be sent, the function should return SLURM_ERROR and set
 its errno to an appropriate value to indicate the reason for failure.</p>
 
@@ -114,28 +114,28 @@ its errno to an appropriate value to indicate the reason for failure.</p>
 <p class="commandline">int slurm_container_destroy (uint32_t id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Destroy or  otherwise
 invalidate a job container.
-This does not imply the container is empty, just that it is no longer 
+This does not imply the container is empty, just that it is no longer
 needed.</p>
-<p style="margin-left:.2in"><b>Arguments</b>: 
-<span class="commandline"> id</span> &nbsp;&nbsp; (input) 
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> id</span> &nbsp;&nbsp; (input)
 Job container's ID.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">uint32_t slurm_container_find (pid_t pid);</p>
-<p style="margin-left:.2in"><b>Description</b>: 
+<p style="margin-left:.2in"><b>Description</b>:
 Given a process ID, return its job container ID.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> pid</span>&nbsp; &nbsp;&nbsp;(input) 
+<span class="commandline"> pid</span>&nbsp; &nbsp;&nbsp;(input)
 A process ID.</p>
-<p style="margin-left:.2in"><b>Returns</b>: The job container ID 
+<p style="margin-left:.2in"><b>Returns</b>: The job container ID
 with this process or zero if none is found.</p>
 
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM Process Tracking API. 
-Future releases of SLURM may revise this API. A process tracking plugin 
-conveys its ability to implement a particular API version using the 
+<p> This document describes version 0 of the SLURM Process Tracking API.
+Future releases of SLURM may revise this API. A process tracking plugin
+conveys its ability to implement a particular API version using the
 mechanism outlined for SLURM plugins.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml
index 6e9c59e8f633560f84ee48e9cd72dc526247fb7d..bca0780524a9057ba6dc110a724dc74df4b4e3fa 100644
--- a/doc/html/programmer_guide.shtml
+++ b/doc/html/programmer_guide.shtml
@@ -4,39 +4,39 @@
 
 <h2>Overview</h2>
 
-<p>Simple Linux Utility for Resource Management (SLURM) is an open source, fault-tolerant, 
-and highly scalable cluster management and job scheduling system for large and 
-small Linux clusters. Components include machine status, partition management, 
-job management, scheduling, and stream copy modules. SLURM requires no kernel 
-modifications for it operation and is relatively self-contained. 
-<p>There is an overview of the components and their interactions available in 
-a separate document, <a href="slurm_design.pdf"> SLURM: Simple Linux Utility for 
-Resource Management</a> [PDF]. 
-
-<p>SLURM is written in the C language and uses a GNU <b>autoconf</b> configuration 
-engine. While initially written for Linux, other UNIX-like operating systems should 
-be easy porting targets. Code should adhere to the <a href="coding_style.pdf"> 
-Linux kernel coding style</a>. <i>(Some components of SLURM have been taken from 
-various sources. Some of these components do not conform to the Linux kernel 
+<p>Simple Linux Utility for Resource Management (SLURM) is an open source, fault-tolerant,
+and highly scalable cluster management and job scheduling system for large and
+small Linux clusters. Components include machine status, partition management,
+job management, scheduling, and stream copy modules. SLURM requires no kernel
+modifications for it operation and is relatively self-contained.
+<p>There is an overview of the components and their interactions available in
+a separate document, <a href="slurm_design.pdf"> SLURM: Simple Linux Utility for
+Resource Management</a> [PDF].
+
+<p>SLURM is written in the C language and uses a GNU <b>autoconf</b> configuration
+engine. While initially written for Linux, other UNIX-like operating systems should
+be easy porting targets. Code should adhere to the <a href="coding_style.pdf">
+Linux kernel coding style</a>. <i>(Some components of SLURM have been taken from
+various sources. Some of these components do not conform to the Linux kernel
 coding style. However, new code written for SLURM should follow these standards.)</i>
- 
-<p>Many of these modules have been built and tested on a variety of Unix computers 
-including Red Hat Linux, IBM's AIX, Sun's Solaris, and Compaq's Tru-64. The only 
-module at this time that is operating system dependent is <span class="commandline">src/slurmd/read_proc.c</span>. 
-We will be porting and testing on additional platforms in future releases. 
+
+<p>Many of these modules have been built and tested on a variety of Unix computers
+including Red Hat Linux, IBM's AIX, Sun's Solaris, and Compaq's Tru-64. The only
+module at this time that is operating system dependent is <span class="commandline">src/slurmd/read_proc.c</span>.
+We will be porting and testing on additional platforms in future releases.
 
 <h2>Plugins</h2>
 
-<p>To make the use of different infrastructures possible, SLURM uses a general 
-purpose plugin mechanism. A SLURM plugin is a dynamically linked code object that 
-is loaded explicitly at run time by the SLURM libraries. It provides a customized 
-implementation of a well-defined API connected to tasks such as authentication, 
-interconnect fabric, task scheduling, etc. A set of functions is defined for use 
-by all of the different infrastructures of a particular variety. When a SLURM 
-daemon is initiated, it reads the configuration file to determine which of the 
-available plugins should be used. A <a href="plugins.html">plugin developer's 
-guide</a> is available with general information about plugins. Most plugin 
-types also have their own documentation available, such as 
+<p>To make the use of different infrastructures possible, SLURM uses a general
+purpose plugin mechanism. A SLURM plugin is a dynamically linked code object that
+is loaded explicitly at run time by the SLURM libraries. It provides a customized
+implementation of a well-defined API connected to tasks such as authentication,
+interconnect fabric, task scheduling, etc. A set of functions is defined for use
+by all of the different infrastructures of a particular variety. When a SLURM
+daemon is initiated, it reads the configuration file to determine which of the
+available plugins should be used. A <a href="plugins.html">plugin developer's
+guide</a> is available with general information about plugins. Most plugin
+types also have their own documentation available, such as
 <a href="authplugins.html">SLURM Authentication Plugin API</a> and
 <a href="jobcompplugins.html">SLURM Job Completion Logging API</a>.</p>
 
@@ -44,31 +44,31 @@ types also have their own documentation available, such as
 
 <h2>Directory Structure</h2>
 
-<p>The contents of the SLURM directory structure will be described below in increasing 
-detail as the structure is descended. The top level directory contains the scripts 
-and tools required to build the entire SLURM system. It also contains a variety 
+<p>The contents of the SLURM directory structure will be described below in increasing
+detail as the structure is descended. The top level directory contains the scripts
+and tools required to build the entire SLURM system. It also contains a variety
 of subdirectories for each type of file.</p>
-<p>General build tools/files include: <b>acinclude.m4</b>, <b>autogen.sh</b>, 
-<b>configure.ac</b>, <b>Makefile.am</b>, <b>Make-rpm.mk</b>, <b>META</b>, <b>README</b>, 
-<b>slurm.spec.in</b>, and the contents of the <b>auxdir</b> directory. <span class="commandline">autoconf</span> 
-and <span class="commandline">make</span> commands are used to build and install 
-SLURM in an automated fashion. NOTE: <span class="commandline">autoconf</span> 
-version 2.52 or higher is required to build SLURM. Execute 
-<span class="commandline">autoconf -V</span> to check your version number. 
-The build process is described in the README file. 
-
-<p>Copyright and disclaimer information are in the files COPYING and DISCLAIMER. 
+<p>General build tools/files include: <b>acinclude.m4</b>, <b>autogen.sh</b>,
+<b>configure.ac</b>, <b>Makefile.am</b>, <b>Make-rpm.mk</b>, <b>META</b>, <b>README</b>,
+<b>slurm.spec.in</b>, and the contents of the <b>auxdir</b> directory. <span class="commandline">autoconf</span>
+and <span class="commandline">make</span> commands are used to build and install
+SLURM in an automated fashion. NOTE: <span class="commandline">autoconf</span>
+version 2.52 or higher is required to build SLURM. Execute
+<span class="commandline">autoconf -V</span> to check your version number.
+The build process is described in the README file.
+
+<p>Copyright and disclaimer information are in the files COPYING and DISCLAIMER.
 All of the top-level subdirectories are described below.</p>
 
 <p style="margin-left:.2in"><b>auxdir</b>&#151;Used for building SLURM.<br>
 <b>contribs</b>&#151;Various contributed tools.<br>
 <b>doc</b>&#151;Documentation including man pages. <br>
 <b>etc</b>&#151;Sample configuration files.<br>
-<b>slurm</b>&#151;Header files for API use. These files must be installed. Placing 
+<b>slurm</b>&#151;Header files for API use. These files must be installed. Placing
 these header files in this location makes for better code portability.<br>
-<b>src</b>&#151;Contains all source code and header files not in the "slurm" subdirectory 
+<b>src</b>&#151;Contains all source code and header files not in the "slurm" subdirectory
 described above.<br>
-<b>testsuite</b>&#151;DejaGnu and Expect are used for testing all of its files 
+<b>testsuite</b>&#151;DejaGnu and Expect are used for testing all of its files
 are here.</p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -78,21 +78,21 @@ are here.</p>
 Two directories are of particular interest:</p>
 
 <p style="margin-left:.2in">
-<b>doc/man</b>&#151; contains the man pages for the APIs, 
+<b>doc/man</b>&#151; contains the man pages for the APIs,
 configuration file, commands, and daemons.<br>
 <b>doc/html</b>&#151; contains the web pages.</p>
- 
+
 <h2>Source Code</h2>
 
-<p>Functions are divided into several categories, each in its own subdirectory. 
-The details of each directory's contents are proved below. The directories are 
+<p>Functions are divided into several categories, each in its own subdirectory.
+The details of each directory's contents are proved below. The directories are
 as follows: </p>
 
 <p style="margin-left:.2in">
-<b>api</b>&#151;Application Program Interfaces into 
-the SLURM code. Used to send and get SLURM information from the central manager. 
+<b>api</b>&#151;Application Program Interfaces into
+the SLURM code. Used to send and get SLURM information from the central manager.
 These are the functions user applications might utilize.<br>
-<b>common</b>&#151;General purpose functions for widespread use throughout 
+<b>common</b>&#151;General purpose functions for widespread use throughout
 SLURM.<br>
 <b>database</b>&#151;Various database files that support the accounting
  storage plugin.<br>
@@ -120,16 +120,16 @@ topology.<br>
 <b>sacct</b>&#151;User command to view accounting information about jobs.<br>
 <b>sacctmgr</b>&#151;User and administrator tool to manage accounting.<br>
 <b>salloc</b>&#151;User command to allocate resources for a job.<br>
-<b>sattach</b>&#151;User command to attach standard input, output and error 
+<b>sattach</b>&#151;User command to attach standard input, output and error
 files to a running job or job step.<br>
 <b>sbatch</b>&#151;User command to submit a batch job (script for later execution).<br>
-<b>sbcast</b>&#151;User command to broadcast a file to all nodes associated 
+<b>sbcast</b>&#151;User command to broadcast a file to all nodes associated
 with an existing SLURM job.<br>
 <b>scancel</b>&#151;User command to cancel (or signal) a job or job step.<br>
 <b>scontrol</b>&#151;Administrator tool to manage SLURM.<br>
 <b>sinfo</b>&#151;User command to get information on SLURM nodes and partitions.<br>
 <b>slurmctld</b>&#151;SLURM central manager daemon code.<br>
-<b>slurmd</b>&#151;SLURM daemon code to manage the compute server nodes including 
+<b>slurmd</b>&#151;SLURM daemon code to manage the compute server nodes including
 the execution of user applications.<br>
 <b>slurmdbd</b>&#151;SLURM database daemon managing access to the accounting
 storage database.<br>
@@ -141,7 +141,7 @@ calculation when the Multifactor Job Priority plugin is installed.<br>
 <b>squeue</b>&#151;User command to get information on SLURM jobs and job steps.<br>
 <b>sreport</b>&#151;User command to view various reports about past
 usage across the enterprise.<br>
-<b>srun</b>&#151;User command to submit a job, get an allocation, and/or 
+<b>srun</b>&#151;User command to submit a job, get an allocation, and/or
 initiation a parallel job step.<br>
 <b>srun_cr</b>&#151;Checkpoint/Restart wrapper for srun.<br>
 <b>sshare</b>&#151;User command to view shares and usage when the Multifactor
@@ -149,36 +149,36 @@ Job Priority plugin is installed.<br>
 <b>sstat</b>&#151;User command to view detailed statistics about running
 jobs when a Job Accounting Gather plugin is installed.<br>
 <b>strigger</b>&#151;User and administrator tool to manage event triggers.<br>
-<b>sview</b>&#151;User command to view and update node, partition, and job 
+<b>sview</b>&#151;User command to view and update node, partition, and job
 job state information.<br>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Configuration</h2>
-<p>Sample configuration files are included in the <b>etc</b> subdirectory. 
-The <b>slurm.conf</b> can be built using a <a href="configurator.html">configuration tool</a>. 
-See <b>doc/man/man5/slurm.conf.5</b> and the man pages for other configuration files 
+<p>Sample configuration files are included in the <b>etc</b> subdirectory.
+The <b>slurm.conf</b> can be built using a <a href="configurator.html">configuration tool</a>.
+See <b>doc/man/man5/slurm.conf.5</b> and the man pages for other configuration files
 for more details.
-<b>init.d.slurm</b> is a script that determines which 
-SLURM daemon(s) should execute on any node based upon the configuration file contents. 
-It will also manage these daemons: starting, signalling, restarting, and stopping them.</p> 
+<b>init.d.slurm</b> is a script that determines which
+SLURM daemon(s) should execute on any node based upon the configuration file contents.
+It will also manage these daemons: starting, signalling, restarting, and stopping them.</p>
 
 <h2>Test Suite</h2>
-<p>The <b>testsuite</b> files use a DejaGnu framework for testing. These tests 
+<p>The <b>testsuite</b> files use a DejaGnu framework for testing. These tests
 are very limited in scope.</p>
 
 <p>We also have a set of Expect SLURM tests available under the <b>testsuite/expect</b>
-directory.  These tests are executed after SLURM has been installed 
-and the daemons initiated. About 250 test scripts exercise all SLURM commands 
+directory.  These tests are executed after SLURM has been installed
+and the daemons initiated. About 250 test scripts exercise all SLURM commands
 and options including stress tests.  The file <b>testsuite/expect/globals</b>
 contains default paths and procedures for all of the individual tests.  At
 the very least, you will need to set the <i>slurm_dir</i> variable to the correct
 value.  To avoid conflicts with other developers, you can override variable settings
 in a separate file named <b>testsuite/expect/globals.local</b>.</p>
 
-<p>Set your working directory to <b>testsuite/expect</b> before 
+<p>Set your working directory to <b>testsuite/expect</b> before
 starting these tests.  Tests may be executed individually by name
-(e.g.  <i>test1.1</i>) 
+(e.g.  <i>test1.1</i>)
 or the full test suite may be executed with the single command <i>regression</i>.
 See <b>testsuite/expect/README</b> for more information.</p>
 
@@ -186,20 +186,20 @@ See <b>testsuite/expect/README</b> for more information.</p>
 <h2>Adding Files and Directories</h2>
 <p>If you are adding files and directories to SLURM, it will be necessary to
 re-build configuration files before executing the <b>configure</b> command.
-Update <b>Makefile.am</b> files as needed then execute 
+Update <b>Makefile.am</b> files as needed then execute
 <b>autogen.sh</b> before executing <b>configure</b>.
 
 <h2>Tricks of the Trade</h2>
 <h3>HAVE_FRONT_END</h3>
 <p>You can make a single node appear to SLURM as a Linux cluster by running
-<i>configure</i> with the <i>--enable-front-end</i> option. This 
+<i>configure</i> with the <i>--enable-front-end</i> option. This
 defines b>HAVE_FRONT_END</b> with a non-zero value in the file <b>config.h</b>.
 All (fake) nodes should be defined in the <b>slurm.conf</b> file.
 These nodes should be configured with a single <b>NodeAddr</b> value
-indicating the node on which single <span class="commandline">slurmd</span> daemon 
-executes.  Initiate one <span class="commandline">slurmd</span> and one 
-<span class="commandline">slurmctld</span> daemon. Do not initiate too many 
-simultaneous job steps to avoid overloading the 
+indicating the node on which single <span class="commandline">slurmd</span> daemon
+executes.  Initiate one <span class="commandline">slurmd</span> and one
+<span class="commandline">slurmctld</span> daemon. Do not initiate too many
+simultaneous job steps to avoid overloading the
 <span class="commandline">slurmd</span> daemon executing them all.</p>
 
 <h3><a name="multiple_slurmd_support">Multiple slurmd support</a></h3>
diff --git a/doc/html/publications.shtml b/doc/html/publications.shtml
index 49c34f8f7585e736b1b5fd75b2b992f39464fe22..62024247a6c7e190e64c41dc0e7ec415ad8fe883 100644
--- a/doc/html/publications.shtml
+++ b/doc/html/publications.shtml
@@ -7,12 +7,12 @@
 <h2>Presentations</h2>
 
 <ul>
-<li><a href="slurm.sc08.bof.pdf">High Scalability Resource Management with 
+<li><a href="slurm.sc08.bof.pdf">High Scalability Resource Management with
 SLURM</a> (Supercomputing 2008, November 2008)</li>
 <li><a href="slurm.sc08.status.pdf">SLURM Status Report</a>
 (Supercomputing 2008, November 2008)</li>
 <li><a href="slurm_v1.3.pdf">SLURM Version 1.3</a> (May 2008)</li>
-<li><a href="slurm_moab.pdf">Managing Clusters with Moab and SLURM</a> 
+<li><a href="slurm_moab.pdf">Managing Clusters with Moab and SLURM</a>
 (May 2008)</li>
 <li><a href="slurm_v1.2.pdf">Resource Management at LLNL, SLURM Version 1.2</a>
 (April 2007)</li>
@@ -36,14 +36,14 @@ S. M. Balle and D. Palermo,
 
 <p><a href="slurm_design.pdf">
 <b>SLURM: Simple Linux Utility for Resource Management</b></a> [PDF],
-M. Jette and M. Grondona, 
+M. Jette and M. Grondona,
 <i>Proceedings of ClusterWorld Conference and Expo</i>,
 San Jose, California, June 2003.</p>
 
 <p><b>SLURM: Simple Linux Utility for Resource Management</b>,
 A. Yoo, M. Jette, and M. Grondona,
 <i>Job Scheduling Strategies for Parallel Processing</i>,
-volume 2862 of <i>Lecture Notes in Computer Science</i>, 
+volume 2862 of <i>Lecture Notes in Computer Science</i>,
 pages 44-60,
 Springer-Verlag, 2003.</p>
 
@@ -51,7 +51,7 @@ Springer-Verlag, 2003.</p>
 
 <p><a href="http://www.rce-cast.com/index.php/Podcast/rce-10-slurm.html">
 RCE 10: SLURM (podcast)</a>:
-Brock Palen and Jeff Squyres speak with Morris Jette and 
+Brock Palen and Jeff Squyres speak with Morris Jette and
 Danny Auble of LLNL about SLURM.</p>
 
 <p style="text-align:center;">Last modified 27 May 2009</p>
diff --git a/doc/html/quickstart.shtml b/doc/html/quickstart.shtml
index c4a21e1e1d2a08bd92b1edec0a70bc54b206a673..c14563e481850189a0903520ff193c743f106b5c 100644
--- a/doc/html/quickstart.shtml
+++ b/doc/html/quickstart.shtml
@@ -3,25 +3,25 @@
 <h1>Quick Start User Guide</h1>
 
 <h2>Overview</h2>
-<p>The Simple Linux Utility for Resource Management (SLURM) is an open source, 
-fault-tolerant, and highly scalable cluster management and job scheduling system 
-for large and small Linux clusters. SLURM requires no kernel modifications for 
-its operation and is relatively self-contained. As a cluster resource manager, 
-SLURM has three key functions. First, it allocates exclusive and/or non-exclusive 
-access to resources (compute nodes) to users for some duration of time so they 
-can perform work. Second, it provides a framework for starting, executing, and 
-monitoring work (normally a parallel job) on the set of allocated nodes. Finally, 
+<p>The Simple Linux Utility for Resource Management (SLURM) is an open source,
+fault-tolerant, and highly scalable cluster management and job scheduling system
+for large and small Linux clusters. SLURM requires no kernel modifications for
+its operation and is relatively self-contained. As a cluster resource manager,
+SLURM has three key functions. First, it allocates exclusive and/or non-exclusive
+access to resources (compute nodes) to users for some duration of time so they
+can perform work. Second, it provides a framework for starting, executing, and
+monitoring work (normally a parallel job) on the set of allocated nodes. Finally,
 it arbitrates contention for resources by managing a queue of pending work.</p>
 
 <h2>Architecture</h2>
-<p>As depicted in Figure 1, SLURM consists of a <b>slurmd</b> daemon running on 
-each compute node and a central <b>slurmctld</b> daemon running on a management node 
-(with optional fail-over twin). 
+<p>As depicted in Figure 1, SLURM consists of a <b>slurmd</b> daemon running on
+each compute node and a central <b>slurmctld</b> daemon running on a management node
+(with optional fail-over twin).
 The <b>slurmd</b> daemons provide fault-tolerant hierarchical communications.
 The user commands include: <b>sacct</b>, <b>salloc</b>, <b>sattach</b>,
-<b>sbatch</b>, <b>sbcast</b>, <b>scancel</b>, <b>scontrol</b>,  
-<b>sinfo</b>, <b>smap</b>, <b>squeue</b>, <b>srun</b>, <b>strigger</b> 
-and <b>sview</b>.  
+<b>sbatch</b>, <b>sbcast</b>, <b>scancel</b>, <b>scontrol</b>,
+<b>sinfo</b>, <b>smap</b>, <b>squeue</b>, <b>srun</b>, <b>strigger</b>
+and <b>sview</b>.
 All of the commands can run anywhere in the cluster.</p>
 
 <div class="figure">
@@ -29,19 +29,19 @@ All of the commands can run anywhere in the cluster.</p>
   Figure 1. SLURM components
 </div>
 
-<p>The entities managed by these SLURM daemons, shown in Figure 2, include 
-<b>nodes</b>, the compute resource in SLURM, 
-<b>partitions</b>, which group nodes into logical (possibly overlapping) sets, 
-<b>jobs</b>, or allocations of resources assigned to a user for 
-a specified amount of time, and 
-<b>job steps</b>, which are sets of (possibly parallel) tasks within a job. 
-The partitions can be considered job queues, each of which has an assortment of 
+<p>The entities managed by these SLURM daemons, shown in Figure 2, include
+<b>nodes</b>, the compute resource in SLURM,
+<b>partitions</b>, which group nodes into logical (possibly overlapping) sets,
+<b>jobs</b>, or allocations of resources assigned to a user for
+a specified amount of time, and
+<b>job steps</b>, which are sets of (possibly parallel) tasks within a job.
+The partitions can be considered job queues, each of which has an assortment of
 constraints such as job size limit, job time limit, users permitted to use it, etc.
-Priority-ordered jobs are allocated nodes within a partition until the resources 
-(nodes, processors, memory, etc.) within that partition are exhausted. Once 
-a job is assigned a set of nodes, the user is able to initiate parallel work in 
-the form of job steps in any configuration within the allocation. For instance, 
-a single job step may be started that utilizes all nodes allocated to the job, 
+Priority-ordered jobs are allocated nodes within a partition until the resources
+(nodes, processors, memory, etc.) within that partition are exhausted. Once
+a job is assigned a set of nodes, the user is able to initiate parallel work in
+the form of job steps in any configuration within the allocation. For instance,
+a single job step may be started that utilizes all nodes allocated to the job,
 or several job steps may independently use a portion of the allocation.</p>
 
 <div class="figure">
@@ -52,23 +52,23 @@ or several job steps may independently use a portion of the allocation.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Commands</h2>
-<p>Man pages exist for all SLURM daemons, commands, and API functions. The command 
-option <span class="commandline">--help</span> also provides a brief summary of 
+<p>Man pages exist for all SLURM daemons, commands, and API functions. The command
+option <span class="commandline">--help</span> also provides a brief summary of
 options. Note that the command options are all case insensitive.</p>
 
 <p><span class="commandline"><b>sacct</b></span> is used to report job or job
 step accounting information about active or completed jobs.</p>
 
-<p><span class="commandline"><b>salloc</b></span> is used to allocate resources 
-for a job in real time. Typically this is used to allocate resources and spawn a shell. 
+<p><span class="commandline"><b>salloc</b></span> is used to allocate resources
+for a job in real time. Typically this is used to allocate resources and spawn a shell.
 The shell is then used to execute srun commands to launch parallel tasks.</p>
 
-<p><span class="commandline"><b>sattach</b></span> is used to attach standard 
-input, output, and error plus signal capabilities to a currently running 
+<p><span class="commandline"><b>sattach</b></span> is used to attach standard
+input, output, and error plus signal capabilities to a currently running
 job or job step. One can attach to and detach from jobs multiple times.</p>
 
-<p><span class="commandline"><b>sbatch</b></span> is used to submit a job script 
-for later execution. The script will typically contain one or more srun commands 
+<p><span class="commandline"><b>sbatch</b></span> is used to submit a job script
+for later execution. The script will typically contain one or more srun commands
 to launch parallel tasks.</p>
 
 <p><span class="commandline"><b>sbcast</b></span> is used to transfer a file
@@ -102,42 +102,42 @@ execution or initiate job steps in real time.
 <span class="commandline">srun</span>
 has a wide variety of options to specify resource requirements, including: minimum
 and maximum node count, processor count, specific nodes to use or not use, and
-specific node characteristics (so much memory, disk space, certain required 
+specific node characteristics (so much memory, disk space, certain required
 features, etc.).
 A job can contain multiple job steps executing sequentially or in parallel on
 independent or shared nodes within the job's node allocation.</p>
 
-<p><span class="commandline"><b>smap</b></span> reports state information for 
-jobs, partitions, and nodes managed by SLURM, but graphically displays the 
+<p><span class="commandline"><b>smap</b></span> reports state information for
+jobs, partitions, and nodes managed by SLURM, but graphically displays the
 information to reflect network topology.</p>
 
-<p><span class="commandline"><b>strigger</b></span> is used to set, get or 
+<p><span class="commandline"><b>strigger</b></span> is used to set, get or
 view event triggers. Event triggers include things such as nodes going down
 or jobs approaching their time limit.</p>
 
-<p><span class="commandline"><b>sview</b></span> is a graphical user interface to 
+<p><span class="commandline"><b>sview</b></span> is a graphical user interface to
 get and update state information for jobs, partitions, and nodes managed by SLURM.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Examples</h2>
-<p>First we determine what partitions exist on the system, what nodes 
-they include, and general system state. This information is provided 
-by the <span class="commandline">sinfo</span> command. 
-In the example below we find there are two partitions: <i>debug</i> 
+<p>First we determine what partitions exist on the system, what nodes
+they include, and general system state. This information is provided
+by the <span class="commandline">sinfo</span> command.
+In the example below we find there are two partitions: <i>debug</i>
 and <i>batch</i>.
-The <i>*</i> following the name <i>debug</i> indicates this is the 
-default partition for submitted jobs. 
-We see that both partitions are in an <i>UP</i> state. 
+The <i>*</i> following the name <i>debug</i> indicates this is the
+default partition for submitted jobs.
+We see that both partitions are in an <i>UP</i> state.
 Some configurations may include partitions for larger jobs
-that are <i>DOWN</i> except on weekends or at night. The information 
-about each partition may be split over more than one line so that 
-nodes in different states can be identified. 
-In this case, the two nodes <i>adev[1-2]</i> are <i>down</i>. 
+that are <i>DOWN</i> except on weekends or at night. The information
+about each partition may be split over more than one line so that
+nodes in different states can be identified.
+In this case, the two nodes <i>adev[1-2]</i> are <i>down</i>.
 The <i>*</i> following the state <i>down</i> indicate the nodes are
 not responding. Note the use of a concise expression for node
 name specification with a common prefix <i>adev</i> and numeric
-ranges or specific numbers identified. This format allows for 
+ranges or specific numbers identified. This format allows for
 very clusters to be easily managed.
 The <span class="commandline">sinfo</span> command
 has many options to easily let you view the information of interest
@@ -153,13 +153,13 @@ batch        up      30:00     3  alloc adev[7-8,14]
 batch        up      30:00     4   idle adev[9-12]
 </pre>
 
-<p>Next we determine what jobs exist on the system using the 
+<p>Next we determine what jobs exist on the system using the
 <span class="commandline">squeue</span> command. The
 <i>ST</i> field is job state.
-Two jobs are in a running state (<i>R</i> is an abbreviation 
+Two jobs are in a running state (<i>R</i> is an abbreviation
 for <i>Running</i>) while one job is in a pending state
 (<i>PD</i> is an abbreviation for <i>Pending</i>).
-The <i>TIME</i> field shows how long the jobs have run 
+The <i>TIME</i> field shows how long the jobs have run
 for using the format <i>days-hours:minutes:seconds</i>.
 The <i>NODELIST(REASON)</i> field indicates where the
 job is running or the reason it is still pending. Typical
@@ -168,7 +168,7 @@ for resources to become available) and <i>Priority</i>
 (queued behind a higher priority job).
 The <span class="commandline">squeue</span> command
 has many options to easily let you view the information of interest
-to you in whatever format you prefer. 
+to you in whatever format you prefer.
 See the man page for more information.</p>
 <pre>
 adev0: squeue
@@ -179,15 +179,15 @@ JOBID PARTITION  NAME  USER ST  TIME NODES NODELIST(REASON)
 </pre>
 
 <p>The <span class="commandline">scontrol</span> command
-can be used to report more detailed information about 
+can be used to report more detailed information about
 nodes, partitions, jobs, job steps, and configuration.
 It can also be used by system administrators to make
-configuration changes. A couple of examples are shown 
+configuration changes. A couple of examples are shown
 below. See the man page for more information.</p>
 <pre>
 adev0: scontrol show partition
 PartitionName=debug TotalNodes=5 TotalCPUs=40 RootOnly=NO
-   Default=YES Shared=FORCE:4 Priority=1 State=UP 
+   Default=YES Shared=FORCE:4 Priority=1 State=UP
    MaxTime=00:30:00 Hidden=NO
    MinNodes=1 MaxNodes=26 DisableRootJobs=NO AllowGroups=ALL
    Nodes=adev[1-5] NodeIndices=0-4
@@ -202,7 +202,7 @@ PartitionName=batch TotalNodes=10 TotalCPUs=80 RootOnly=NO
 adev0: scontrol show node adev1
 NodeName=adev1 State=DOWN* CPUs=8 AllocCPUs=0
    RealMemory=4000 TmpDisk=0
-   Sockets=2 Cores=4 Threads=1 Weight=1 Features=intel 
+   Sockets=2 Cores=4 Threads=1 Weight=1 Features=intel
    Reason=Not responding [slurm@06/02-14:01:24]
 
 65648     batch  math  phil PD  0:00     6 (Resources)
@@ -227,14 +227,14 @@ JobId=65672 UserId=phil(5136) GroupId=phil(5136)
 </pre>
 
 <p>It is possible to create a resource allocation and launch
-the tasks for a job step in a single command line using the 
-<span class="commandline">srun</span> command. Depending 
-upon the MPI implementation used, MPI jobs may also be 
-launched in this manner. 
+the tasks for a job step in a single command line using the
+<span class="commandline">srun</span> command. Depending
+upon the MPI implementation used, MPI jobs may also be
+launched in this manner.
 See the <a href="#mpi">MPI</a> section for more MPI-specific information.
-In this example we execute <span class="commandline">/bin/hostname</span> 
-on three nodes (<i>-N3</i>) and include task numbers on the output (<i>-l</i>). 
-The default partition will be used. 
+In this example we execute <span class="commandline">/bin/hostname</span>
+on three nodes (<i>-N3</i>) and include task numbers on the output (<i>-l</i>).
+The default partition will be used.
 One task per node will be used by default.
 Note that the <span class="commandline">srun</span> command has
 many options available to control what resource are allocated
@@ -246,9 +246,9 @@ adev0: srun -N3 -l /bin/hostname
 2: adev5
 </pre>
 
-<p>This variation on the previous example executes 
-<span class="commandline">/bin/hostname</span> in four tasks (<i>-n4</i>). 
-One processor per task will be used by default (note that we don't specify 
+<p>This variation on the previous example executes
+<span class="commandline">/bin/hostname</span> in four tasks (<i>-n4</i>).
+One processor per task will be used by default (note that we don't specify
 a node count).</p>
 <pre>
 adev0: srun -n4 -l /bin/hostname
@@ -259,10 +259,10 @@ adev0: srun -n4 -l /bin/hostname
 </pre>
 
 <p>One common mode of operation is to submit a script for later execution.
-In this example the script name is <i>my.script</i> and we explicitly use 
-the nodes adev9 and adev10 (<i>-w "adev[9-10]"</i>, note the use of a 
-node range expression). 
-We also explicitly state that the subsequent job steps will spawn four tasks 
+In this example the script name is <i>my.script</i> and we explicitly use
+the nodes adev9 and adev10 (<i>-w "adev[9-10]"</i>, note the use of a
+node range expression).
+We also explicitly state that the subsequent job steps will spawn four tasks
 each, which will insure that our allocation contains at least four processors
 (one processor per task to be launched).
 The output will appear in the file my.stdout ("-o my.stdout").
@@ -272,9 +272,9 @@ by the option at the beginning of the script (before any commands to be executed
 in the script).
 Options supplied on the command line would override any options specified within
 the script.
-Note that my.script contains the command <span class="commandline">/bin/hostname</span> 
-that executed on the first node in the allocation (where the script runs) plus 
-two job steps initiated using the <span class="commandline">srun</span> command 
+Note that my.script contains the command <span class="commandline">/bin/hostname</span>
+that executed on the first node in the allocation (where the script runs) plus
+two job steps initiated using the <span class="commandline">srun</span> command
 and executed sequentially.</p>
 <pre>
 adev0: cat my.script
@@ -299,26 +299,26 @@ adev9
 3: /home/jette
 </pre>
 
-<p>The final mode of operation is to create a resource allocation 
-and spawn job steps within that allocation. 
+<p>The final mode of operation is to create a resource allocation
+and spawn job steps within that allocation.
 The <span class="commandline">salloc</span> command is used
-to create a resource allocation and typically start a shell within 
-that allocation. 
-One or more job steps would typically be executed within that allocation 
+to create a resource allocation and typically start a shell within
+that allocation.
+One or more job steps would typically be executed within that allocation
 using the <span class="commandline">srun</span> command to launch the tasks
-(depending upon the type of MPI being used, the launch mechanism may 
+(depending upon the type of MPI being used, the launch mechanism may
 differ, see <a href="#mpi">MPI</a> details below).
-Finally the shell created by <span class="commandline">salloc</span> would 
+Finally the shell created by <span class="commandline">salloc</span> would
 be terminated using the <i>exit</i> command.
 SLURM does not automatically migrate executable or data files
-to the nodes allocated to a job. 
+to the nodes allocated to a job.
 Either the files must exists on local disk or in some global file system
 (e.g. NFS or Lustre).
 We provide the tool <span class="commandline">sbcast</span> to transfer
 files to local storage on allocated nodes using SLURM's hierarchical
 communications.
-In this example we use <span class="commandline">sbcast</span> to transfer 
-the executable program <i>a.out</i> to <i>/tmp/joe.a.out</i> on local storage 
+In this example we use <span class="commandline">sbcast</span> to transfer
+the executable program <i>a.out</i> to <i>/tmp/joe.a.out</i> on local storage
 of the allocated nodes.
 After executing the program, we delete it from local storage</p>
 <pre>
@@ -340,7 +340,7 @@ srun: jobid 473 submitted
 adev0: squeue
 JOBID PARTITION NAME USER ST TIME  NODES NODELIST(REASON)
   473 batch     test jill R  00:00 1     adev9
- 
+
 adev0: scancel 473
 
 adev0: squeue
@@ -351,24 +351,24 @@ JOBID PARTITION NAME USER ST TIME  NODES NODELIST(REASON)
 
 <h2><a name="mpi">MPI</a></h2>
 
-<p>MPI use depends upon the type of MPI being used. 
-There are three fundamentally different modes of operation used 
+<p>MPI use depends upon the type of MPI being used.
+There are three fundamentally different modes of operation used
 by these various MPI implementation.
 <ol>
-<li>SLURM directly launches the tasks and performs initialization 
-of communications (Quadrics MPI, MPICH2, MPICH-GM, MPICH-MX, 
+<li>SLURM directly launches the tasks and performs initialization
+of communications (Quadrics MPI, MPICH2, MPICH-GM, MPICH-MX,
 MVAPICH, MVAPICH2 and some MPICH1 modes).</li>
 <li>SLURM creates a resource allocation for the job and then
 mpirun launches tasks using SLURM's infrastructure (OpenMPI,
 LAM/MPI and HP-MPI).</li>
-<li>SLURM creates a resource allocation for the job and then 
-mpirun launches tasks using some mechanism other than SLURM, 
-such as SSH or RSH (BlueGene MPI and some MPICH1 modes). 
-These tasks initiated outside of SLURM's monitoring 
-or control. SLURM's epilog should be configured to purge 
+<li>SLURM creates a resource allocation for the job and then
+mpirun launches tasks using some mechanism other than SLURM,
+such as SSH or RSH (BlueGene MPI and some MPICH1 modes).
+These tasks initiated outside of SLURM's monitoring
+or control. SLURM's epilog should be configured to purge
 these tasks when the job's allocation is relinquished. </li>
 </ol>
-<p>Links to instructions for using several varieties of MPI 
+<p>Links to instructions for using several varieties of MPI
 with SLURM are provided below.
 <ul>
 <li><a href="mpi_guide.html#bluegene_mpi">BlueGene MPI</a></li>
diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml
index ebadbd3c04f7d7ab49944e166de57b93d43bc8b8..dd4201b89b01c5f276bc63fb629bf11a8c3735dd 100644
--- a/doc/html/quickstart_admin.shtml
+++ b/doc/html/quickstart_admin.shtml
@@ -3,20 +3,20 @@
 <h1>Quick Start Administrator Guide</h1>
 <h2>Overview</h2>
 Please see the <a href="quickstart.html">Quick Start User Guide</a> for a
-general overview. 
+general overview.
 
 <h2>Super Quick Start</h2>
 <ol>
 <li>Make sure that you have synchronized clocks plus consistent users and groups
 (UIDs and GIDs) across the cluster.</li>
-<li>Install <a href="http://home/gna.org/munge">MUNGE</a> for 
+<li>Install <a href="http://home/gna.org/munge">MUNGE</a> for
 authentication. Make sure that all nodes in your cluster have the
 same <i>munge.key</i>. Make sure the MUNGE daemon, <i>munged</i>
 is started before you start the SLURM daemons.</li>
 <li>bunzip2 the distributed tar-ball and untar the files:<br>
 <i>tar --bzip -x -f slurm*tar.bz2</i></li>
 <li><i>cd</i> to the directory containing the SLURM source and type
-<i>./configure</i> with appropriate options, typically <i>--prefix=</i> 
+<i>./configure</i> with appropriate options, typically <i>--prefix=</i>
 and <i>--sysconfdir=</i></li>
 <li>Type <i>make</i> to compile SLURM.</li>
 <li>Type <i>make install</i> to install the programs, documentation, libraries,
@@ -27,7 +27,7 @@ NOTE: The <i>SlurmUser</i> must be created as needed prior to starting SLURM
 and must exist on all nodes of the cluster.<br>
 NOTE: The parent directories for SLURM's log files, process ID files,
 state save directories, etc. are not created by SLURM.
-They must be created and made writable by <i>SlurmUser</i> as needed prior to 
+They must be created and made writable by <i>SlurmUser</i> as needed prior to
 starting SLURM daemons.</li>
 <li>Install the configuration file in <i>&lt;sysconfdir&gt;/slurm.conf</i>.<br>
 NOTE: You will need to install this configuration file on all nodes of the cluster.</li>
@@ -41,34 +41,34 @@ NOTE: You will need to install this configuration file on all nodes of the clust
 
 <h2>Building and Installing SLURM</h2>
 
-<p>Instructions to build and install SLURM manually are shown below. 
+<p>Instructions to build and install SLURM manually are shown below.
 See the README and INSTALL files in the source distribution for more details.
 </p>
 <ol>
-<li>bunzip2 the distributed tar-ball and untar the files:</br> 
+<li>bunzip2 the distributed tar-ball and untar the files:</br>
 <i>tar --bzip -x -f slurm*tar.bz2</i>
-<li><i>cd</i> to the directory containing the SLURM source and type 
+<li><i>cd</i> to the directory containing the SLURM source and type
 <i>./configure</i> with appropriate options (see below).</li>
 <li>Type <i>make</i> to compile SLURM.</li>
-<li>Type <i>make install</i> to install the programs, documentation, libraries, 
+<li>Type <i>make install</i> to install the programs, documentation, libraries,
 header files, etc.</li>
 </ol>
 <p>A full list of <i>configure</i> options will be returned by the command
-<i>configure --help</i>. The most commonly used arguments to the 
+<i>configure --help</i>. The most commonly used arguments to the
 <i>configure</i> command include: </p>
 <p style="margin-left:.2in"><span class="commandline">--enable-debug</span><br>
 Enable additional debugging logic within SLURM.</p>
 <p style="margin-left:.2in"><span class="commandline">--prefix=<i>PREFIX</i></span><br>
-</i> 
+</i>
 Install architecture-independent files in PREFIX; default value is /usr/local.</p>
 <p style="margin-left:.2in"><span class="commandline">--sysconfdir=<i>DIR</i></span><br>
-</i> 
+</i>
 Specify location of SLURM configuration file. The default value is PREFIX/etc</p>
 
-<p>If required libraries or header files are in non-standard locations, 
+<p>If required libraries or header files are in non-standard locations,
 set CFLAGS and LDFLAGS environment variables accordingly.
 Optional SLURM plugins will be built automatically when the
-<span class="commandline">configure</span> script detects that the required 
+<span class="commandline">configure</span> script detects that the required
 build requirements are present. Build dependencies for various plugins
 and commands are denoted below.
 </p>
@@ -76,33 +76,33 @@ and commands are denoted below.
 <li> <b>MUNGE</b> The auth/munge plugin will be built if the MUNGE authentication
                   library is installed. MUNGE is used as the default
                   authentication mechanism.</li>
-<li> <b>Authd</b> The auth/authd plugin will be built and installed if 
-                  the libauth library and its dependency libe are installed. 
+<li> <b>Authd</b> The auth/authd plugin will be built and installed if
+                  the libauth library and its dependency libe are installed.
 		  </li>
 <li> <b>Federation</b> The switch/federation plugin will be built and installed
 		  if the IBM Federation switch library is installed.
 <li> <b>QsNet</b> support in the form of the switch/elan plugin requires
                   that the qsnetlibs package (from Quadrics) be installed along
 		  with its development counterpart (i.e. the qsnetheaders
-		  package.) The switch/elan plugin also requires the 
+		  package.) The switch/elan plugin also requires the
 		  presence of the libelanosts library and /etc/elanhosts
 		  configuration file. (See elanhosts(5) man page in that
 		  package for more details). Define the nodes in the SLURM
-		  configuration file <i>slurm.conf</i> in the same order as 
-		  defined in the <i>elanhosts</i> configuration file so that 
+		  configuration file <i>slurm.conf</i> in the same order as
+		  defined in the <i>elanhosts</i> configuration file so that
 		  node allocation for jobs can be performed so as to optimize
-		  their performance. We highly recommend assigning the nodes 
-		  a numeric suffix equal to its Elan address for ease of 
-		  administration and because the Elan driver does not seem 
+		  their performance. We highly recommend assigning the nodes
+		  a numeric suffix equal to its Elan address for ease of
+		  administration and because the Elan driver does not seem
 		  to function otherwise
 		  (e.g. /etc/elanhosts to contain two lines of this sort:<br>
 		  eip  [0-15]  linux[0-15]<br>
 		  eth  [0-15]  linux[0-15]<br>
-		  for fifteen nodes with a prefix of &quot;linux&quot; and 
-		  numeric suffix between zero and 15).  Finally, the 
-		  &quot;ptrack&quot; kernel patch is required for process 
+		  for fifteen nodes with a prefix of &quot;linux&quot; and
+		  numeric suffix between zero and 15).  Finally, the
+		  &quot;ptrack&quot; kernel patch is required for process
 		  tracking.
-<li> <b>sview</b> The sview command will be built only if and <i>gtk+-2.0</i> 
+<li> <b>sview</b> The sview command will be built only if and <i>gtk+-2.0</i>
                   is installed</li>
 </ul>
 Please see the <a href=download.html>Download</a> page for references to
@@ -114,7 +114,7 @@ the appropriate SLURM version number):<br>
 <span class="commandline">rpmbuild -ta slurm-0.6.0-1.tar.bz2</span></p>
 
 <p>You can control some aspects of the RPM built with a <i>.rpmmacros</i>
-file in your home directory. <b>Special macro definitions will likely 
+file in your home directory. <b>Special macro definitions will likely
 only be required if files are installed in unconventional locations.</b>
 Some macro definitions that may be used in building SLURM include:
 <dl>
@@ -150,85 +150,85 @@ Some macro definitions that may be used in building SLURM include:
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Daemons</h2>
-<p><b>slurmctld</b> is sometimes called the &quot;controller&quot; daemon. It 
-orchestrates SLURM activities, including queuing of job, monitoring node state, 
-and allocating resources (nodes) to jobs. There is an optional backup controller 
-that automatically assumes control in the event the primary controller fails. 
-The primary controller resumes control whenever it is restored to service. The 
+<p><b>slurmctld</b> is sometimes called the &quot;controller&quot; daemon. It
+orchestrates SLURM activities, including queuing of job, monitoring node state,
+and allocating resources (nodes) to jobs. There is an optional backup controller
+that automatically assumes control in the event the primary controller fails.
+The primary controller resumes control whenever it is restored to service. The
 controller saves its state to disk whenever there is a change.
 This state can be recovered by the controller at startup time.
-State changes are saved so that jobs and other state can be preserved when 
+State changes are saved so that jobs and other state can be preserved when
 controller moves (to or from backup controller) or is restarted.</p>
 
-<p>We recommend that you create a Unix user <i>slurm</i> for use by 
-<b>slurmctld</b>. This user name will also be specified using the 
+<p>We recommend that you create a Unix user <i>slurm</i> for use by
+<b>slurmctld</b>. This user name will also be specified using the
 <b>SlurmUser</b> in the slurm.conf configuration file.
-This user must exist on all nodes of the cluster for authentication 
+This user must exist on all nodes of the cluster for authentication
 of communications.
-Note that files and directories used by <b>slurmctld</b> will need to be 
-readable or writable by the user <b>SlurmUser</b> (the slurm configuration 
-files must be readable; the log file directory and state save directory 
+Note that files and directories used by <b>slurmctld</b> will need to be
+readable or writable by the user <b>SlurmUser</b> (the slurm configuration
+files must be readable; the log file directory and state save directory
 must be writable).</p>
 
-<p>The <b>slurmd</b> daemon executes on every compute node. It resembles a 
-remote shell daemon to export control to SLURM. Because slurmd initiates and 
+<p>The <b>slurmd</b> daemon executes on every compute node. It resembles a
+remote shell daemon to export control to SLURM. Because slurmd initiates and
 manages user jobs, it must execute as the user root.</p>
 
-<p>If you want to archive job accounting records to a database, the 
-<b>slurmdbd</b> (SLURM DataBase Daemon) should be used. We recommend that 
-you defer adding accounting support until after basic SLURM functionality is 
-established on your system. An <a href="accounting.html">Accounting</a> web 
+<p>If you want to archive job accounting records to a database, the
+<b>slurmdbd</b> (SLURM DataBase Daemon) should be used. We recommend that
+you defer adding accounting support until after basic SLURM functionality is
+established on your system. An <a href="accounting.html">Accounting</a> web
 page contains more information.</p>
 
-<p><b>slurmctld</b> and/or <b>slurmd</b> should be initiated at node startup 
+<p><b>slurmctld</b> and/or <b>slurmd</b> should be initiated at node startup
 time per the SLURM configuration.
-A file <b>etc/init.d/slurm</b> is provided for this purpose. 
-This script accepts commands <b>start</b>, <b>startclean</b> (ignores 
+A file <b>etc/init.d/slurm</b> is provided for this purpose.
+This script accepts commands <b>start</b>, <b>startclean</b> (ignores
 all saved state), <b>restart</b>, and <b>stop</b>.</p>
 
 <h2>Infrastructure</h2>
 <h3>User and Group Identification</h3>
-<p>There must be a uniform user and group name space (including 
-UIDs and GIDs) across the cluster. 
+<p>There must be a uniform user and group name space (including
+UIDs and GIDs) across the cluster.
 It is not necessary to permit user logins to the control hosts
-(<b>ControlMachine</b> or <b>BackupController</b>), but the 
+(<b>ControlMachine</b> or <b>BackupController</b>), but the
 users and groups must be configured on those hosts.</p>
- 
+
 <h3>Authentication of SLURM communications</h3>
-<p>All communications between SLURM components are authenticated. The 
+<p>All communications between SLURM components are authenticated. The
 authentication infrastructure is provided by a dynamically loaded
-plugin chosen at runtime via the <b>AuthType</b> keyword in the SLURM 
+plugin chosen at runtime via the <b>AuthType</b> keyword in the SLURM
 configuration file.  Currently available authentication types include
-<a href="http://www.theether.org/authd/">authd</a>, 
+<a href="http://www.theether.org/authd/">authd</a>,
 <a href="http://home.gna.org/munge/">munge</a>, and none.
-The default authentication infrastructure is "munge", but this does 
+The default authentication infrastructure is "munge", but this does
 require the installation of the MUNGE package.
 An authentication type of "none" requires no infrastructure, but permits
-any user to execute any job as another user with limited programming effort. 
-This may be fine for testing purposes, but certainly not for production use. 
+any user to execute any job as another user with limited programming effort.
+This may be fine for testing purposes, but certainly not for production use.
 <b>Configure some AuthType value other than "none" if you want any security.</b>
 We recommend the use of MUNGE unless you are experienced with authd.
 If using MUNGE, all nodes in the cluster must be configured with the
-same <i>munge.key</i> file. 
+same <i>munge.key</i> file.
 The MUNGE daemon, <i>munged</i>, must also be started before SLURM daemons.</p>
 
 <p>While SLURM itself does not rely upon synchronized clocks on all nodes
-of a cluster for proper operation, its underlying authentication mechanism 
+of a cluster for proper operation, its underlying authentication mechanism
 do have this requirement.</p>
 
 <h3>MPI support</h3>
-<p>SLURM supports many different SLURM implementations. 
+<p>SLURM supports many different SLURM implementations.
 For more information, see <a href="quickstart.html#mpi">MPI</a>.
 
 <h3>Scheduler support</h3>
 <p>SLURM can be configured with rather simple or quite sophisticated
-scheduling algorithms depending upon your needs and willingness to 
+scheduling algorithms depending upon your needs and willingness to
 manage the configuration (much of which requires a database).
 The first configuration parameter of interest is <b>PriorityType</b>
 with two options available: <i>basic</i> (first-in-first-out) and
-<i>multifactor</i>. 
+<i>multifactor</i>.
 The <i>multifactor</i> plugin will assign a priority to jobs based upon
-a multitude of configuration parameters (age, size, fair-share allocation, 
+a multitude of configuration parameters (age, size, fair-share allocation,
 etc.) and its details are beyond the scope of this document.
 See the <a href="priority_multifactor.html">Multifactor Job Priority Plugin</a>
 document for details.</p>
@@ -238,11 +238,11 @@ jobs are scheduled and several options are available.
 <ul>
 <li><i>builtin</i>  will initiate jobs strictly in their priority order,
 typically (first-in-first-out) </li>
-<li><i>backfill</i> will initiate a lower-priority job if doing so does 
-not delay the expected initiation time of higher priority jobs; essentially 
-using smaller jobs to fill holes in the resource allocation plan. Effective 
+<li><i>backfill</i> will initiate a lower-priority job if doing so does
+not delay the expected initiation time of higher priority jobs; essentially
+using smaller jobs to fill holes in the resource allocation plan. Effective
 backfill scheduling does require users to specify job time limits.</li>
-<li><i>gang</i> time-slices jobs in the same partition/queue and can be 
+<li><i>gang</i> time-slices jobs in the same partition/queue and can be
 used to preempt jobs from lower-priority queues in order to execute
 jobs in higher priority queues.</li>
 <li><i>wiki</i> is an interface for use with
@@ -258,19 +258,19 @@ Moab Cluster Suite</a>
 <a href="preempt.html">Preemption</a>,
 <a href="reservations.html">Resource Reservation Guide</a>,
 <a href="resource_limits.html">Resource Limits</a> and
-<a href="cons_res_share.html">Sharing Consumable Resources</a>.</p> 
+<a href="cons_res_share.html">Sharing Consumable Resources</a>.</p>
 
 <h3>Resource selection</h3>
-<p>The resource selection mechanism used by SLURM is controlled by the 
-<b>SelectType</b> configuration parameter. 
-If you want to execute multiple jobs per node, but apportion the processors, 
-memory and other resources, the <i>cons_res</i> (consumable resources) 
+<p>The resource selection mechanism used by SLURM is controlled by the
+<b>SelectType</b> configuration parameter.
+If you want to execute multiple jobs per node, but apportion the processors,
+memory and other resources, the <i>cons_res</i> (consumable resources)
 plugin is recommended.
-If you tend to dedicate entire nodes to jobs, the <i>linear</i> plugin 
+If you tend to dedicate entire nodes to jobs, the <i>linear</i> plugin
 is recommended.
-For more information, please see 
-<a href="cons_res.html">Consumable Resources in SLURM</a>. 
-For BlueGene systems, <i>bluegene</i> plugin is required (it is topology 
+For more information, please see
+<a href="cons_res.html">Consumable Resources in SLURM</a>.
+For BlueGene systems, <i>bluegene</i> plugin is required (it is topology
 aware and interacts with the BlueGene bridge API).</p>
 
 <h3>Logging</h3>
@@ -280,68 +280,68 @@ is operational. </p>
 
 <h3>Accounting</h3>
 <p>SLURM supports accounting records being written to a simple text file,
-directly to a database (MySQL or PostgreSQL), or to a daemon securely 
-managing accounting data for multiple clusters. For more information 
+directly to a database (MySQL or PostgreSQL), or to a daemon securely
+managing accounting data for multiple clusters. For more information
 see <a href="accounting.html">Accounting</a>. </p>
 
 <h3>Corefile format</h3>
-<p>SLURM is designed to support generating a variety of core file formats for 
+<p>SLURM is designed to support generating a variety of core file formats for
 application codes that fail (see the <i>--core</i> option of the <i>srun</i>
 command).  As of now, SLURM only supports a locally developed lightweight
-corefile library which has not yet been released to the public. It is 
+corefile library which has not yet been released to the public. It is
 expected that this library will be available in the near future. </p>
 
 <h3>Parallel debugger support</h3>
 <p>SLURM exports information for parallel debuggers using the specification
 detailed  <a href=http://www-unix.mcs.anl.gov/mpi/mpi-debug/mpich-attach.txt>here</a>.
 This is meant to be exploited by any parallel debugger (notably, TotalView),
-and support is unconditionally compiled into SLURM code. 
+and support is unconditionally compiled into SLURM code.
 </p>
-<p>We use a patched version of TotalView that looks for a "totalview_jobid" 
-symbol in <b>srun</b> that it then uses (per configuration) to perform a bulk 
+<p>We use a patched version of TotalView that looks for a "totalview_jobid"
+symbol in <b>srun</b> that it then uses (per configuration) to perform a bulk
 launch of the <b>tvdsvr</b> daemons via a subsequent <b>srun</b>. Otherwise
-it is difficult to get TotalView to use <b>srun</b> for a bulk launch, since 
+it is difficult to get TotalView to use <b>srun</b> for a bulk launch, since
 <b>srun</b> will be unable to determine for which job it is launching tasks.
 </p>
 <p>Another solution would be to run TotalView within an existing <b>srun</b>
 <i>--allocate</i> session. Then the Totalview bulk launch command to <b>srun</b>
-could be set to ensure only a single task per node. This functions properly 
-because the SLRUM_JOBID environment variable is set in the allocation shell 
+could be set to ensure only a single task per node. This functions properly
+because the SLRUM_JOBID environment variable is set in the allocation shell
 environment.
 </p>
 
 <h3>Compute node access</h3>
-<p>SLURM does not by itself limit access to allocated compute nodes, 
-but it does provide mechanisms to accomplish this. 
-There is a Pluggable Authentication Module (PAM) for restricting access 
-to compute nodes available for download. 
-When installed, the SLURM PAM module will prevent users from logging 
+<p>SLURM does not by itself limit access to allocated compute nodes,
+but it does provide mechanisms to accomplish this.
+There is a Pluggable Authentication Module (PAM) for restricting access
+to compute nodes available for download.
+When installed, the SLURM PAM module will prevent users from logging
 into any node that has not be assigned to that user.
-On job termination, any processes initiated by the user outside of 
-SLURM's control may be killed using an <i>Epilog</i> script configured 
+On job termination, any processes initiated by the user outside of
+SLURM's control may be killed using an <i>Epilog</i> script configured
 in <i>slurm.conf</i>.
-An example of such a script is included as <i>etc/slurm.epilog.clean</i>. 
-Without these mechanisms any user can login to any compute node, 
+An example of such a script is included as <i>etc/slurm.epilog.clean</i>.
+Without these mechanisms any user can login to any compute node,
 even those allocated to other users.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Configuration</h2>
-<p>The SLURM configuration file includes a wide variety of parameters. 
-This configuration file must be available on each node of the cluster and 
+<p>The SLURM configuration file includes a wide variety of parameters.
+This configuration file must be available on each node of the cluster and
 must have consistent contents. A full
-description of the parameters is included in the <i>slurm.conf</i> man page. Rather than 
-duplicate that information, a minimal sample configuration file is shown below. 
-Your slurm.conf file should define at least the configuration parameters defined 
-in this sample and likely additional ones. Any text 
-following a &quot;#&quot; is considered a comment. The keywords in the file are 
-not case sensitive, although the argument typically is (e.g., &quot;SlurmUser=slurm&quot; 
-might be specified as &quot;slurmuser=slurm&quot;). The control machine, like 
-all other machine specifications, can include both the host name and the name 
-used for communications. In this case, the host's name is &quot;mcri&quot; and 
-the name &quot;emcri&quot; is used for communications. 
-In this case &quot;emcri&quot; is the private management network interface 
-for the host &quot;mcri&quot;. Port numbers to be used for 
+description of the parameters is included in the <i>slurm.conf</i> man page. Rather than
+duplicate that information, a minimal sample configuration file is shown below.
+Your slurm.conf file should define at least the configuration parameters defined
+in this sample and likely additional ones. Any text
+following a &quot;#&quot; is considered a comment. The keywords in the file are
+not case sensitive, although the argument typically is (e.g., &quot;SlurmUser=slurm&quot;
+might be specified as &quot;slurmuser=slurm&quot;). The control machine, like
+all other machine specifications, can include both the host name and the name
+used for communications. In this case, the host's name is &quot;mcri&quot; and
+the name &quot;emcri&quot; is used for communications.
+In this case &quot;emcri&quot; is the private management network interface
+for the host &quot;mcri&quot;. Port numbers to be used for
 communications are specified as well as various timer values.</p>
 
 <p>The <i>SlurmUser</i> must be created as needed prior to starting SLURM.
@@ -350,7 +350,7 @@ state save directories, etc. are not created by SLURM.
 They must be created and made writable by <i>SlurmUser</i> as needed prior to
 starting SLURM daemons.</p>
 
-<p>A description of the nodes and their grouping into partitions is required. 
+<p>A description of the nodes and their grouping into partitions is required.
 A simple node range expression may optionally be used to specify
 ranges of nodes to avoid building a configuration file with large
 numbers of entries. The node range expression can contain one
@@ -369,38 +369,38 @@ If one or more numeric expressions are included, one of them
 must be at the end of the name (e.g. &quot;unit[0-31]rack&quot; is invalid),
 but arbitrary names can always be used in a comma separated list.</p>
 
-<p>Node names can have up to three name specifications: 
+<p>Node names can have up to three name specifications:
 <b>NodeName</b> is the name used by all SLURM tools when referring to the node,
-<b>NodeAddr</b> is the name or IP address SLURM uses to communicate with the node, and 
+<b>NodeAddr</b> is the name or IP address SLURM uses to communicate with the node, and
 <b>NodeHostname</b> is the name returned by the command <i>/bin/hostname -s</i>.
-Only <b>NodeName</b> is required (the others default to the same name), 
-although supporting all three parameters provides complete control over 
-naming and addressing the nodes.  See the <i>slurm.conf</i> man page for 
+Only <b>NodeName</b> is required (the others default to the same name),
+although supporting all three parameters provides complete control over
+naming and addressing the nodes.  See the <i>slurm.conf</i> man page for
 details on all configuration parameters.</p>
 
-<p>Nodes can be in more than one partition and each partition can have different 
+<p>Nodes can be in more than one partition and each partition can have different
 constraints (permitted users, time limits, job size limits, etc.).
 Each partition can thus be considered a separate queue.
-Partition and node specifications use node range expressions to identify 
-nodes in a concise fashion. This configuration file defines a 1154-node cluster 
-for SLURM, but it might be used for a much larger cluster by just changing a few 
-node range expressions. Specify the minimum processor count (Procs), real memory 
-space (RealMemory, megabytes), and temporary disk space (TmpDisk, megabytes) that 
-a node should have to be considered available for use. Any node lacking these 
+Partition and node specifications use node range expressions to identify
+nodes in a concise fashion. This configuration file defines a 1154-node cluster
+for SLURM, but it might be used for a much larger cluster by just changing a few
+node range expressions. Specify the minimum processor count (Procs), real memory
+space (RealMemory, megabytes), and temporary disk space (TmpDisk, megabytes) that
+a node should have to be considered available for use. Any node lacking these
 minimum configuration values will be considered DOWN and not scheduled.
 Note that a more extensive sample configuration file is provided in
-<b>etc/slurm.conf.example</b>. We also have a web-based 
-<a href="configurator.html">configuration tool</a> which can 
+<b>etc/slurm.conf.example</b>. We also have a web-based
+<a href="configurator.html">configuration tool</a> which can
 be used to build a simple configuration file, which can then be
 manually edited for more complex configurations.</p>
 <pre>
-# 
+#
 # Sample /etc/slurm.conf for mcr.llnl.gov
 #
 ControlMachine=mcri
 ControlAddr=emcri
 BackupController=mcrj
-BackupAddr=emcrj 
+BackupAddr=emcrj
 #
 AuthType=auth/munge
 Epilog=/usr/local/slurm/etc/epilog
@@ -430,44 +430,44 @@ NodeName=mcr[0-1151] NodeAddr=emcr[0-1151]
 #
 # Partition Configurations
 #
-PartitionName=DEFAULT State=UP    
+PartitionName=DEFAULT State=UP
 PartitionName=pdebug Nodes=mcr[0-191] MaxTime=30 MaxNodes=32 Default=YES
 PartitionName=pbatch Nodes=mcr[192-1151]
 </pre>
- 
+
 <h2>Security</h2>
 <p>Besides authentication of SLURM communications based upon the value
-of the <b>AuthType</b>, digital signatures are used in job step 
-credentials. 
+of the <b>AuthType</b>, digital signatures are used in job step
+credentials.
 This signature is used by <i>slurmctld</i> to construct a job step
 credential, which is sent to <i>srun</i> and then forwarded to
 <i>slurmd</i> to initiate job steps.
-This design offers improved performance by removing much of the 
+This design offers improved performance by removing much of the
 job step initiation overhead from the <i> slurmctld </i> daemon.
-The digital signature mechanism is specified by the <b>CryptoType</b> 
+The digital signature mechanism is specified by the <b>CryptoType</b>
 configuration parameter and the default mechanism is MUNGE. </p>
 
 <h3>OpenSSL</h3>
-<p>If using <a href="http://www.openssl.org/">OpenSSL</a> digital signatures, 
-unique job credential keys must be created for your site using the program 
-<a href="http://www.openssl.org/">openssl</a>. 
+<p>If using <a href="http://www.openssl.org/">OpenSSL</a> digital signatures,
+unique job credential keys must be created for your site using the program
+<a href="http://www.openssl.org/">openssl</a>.
 <b>You must use openssl and not ssh-genkey to construct these keys.</b>
-An example of how to do this is shown below. Specify file names that 
-match the values of <b>JobCredentialPrivateKey</b> and 
-<b>JobCredentialPublicCertificate</b> in your configuration file. 
-The <b>JobCredentialPrivateKey</b> file must be readable only by <b>SlurmUser</b>. 
+An example of how to do this is shown below. Specify file names that
+match the values of <b>JobCredentialPrivateKey</b> and
+<b>JobCredentialPublicCertificate</b> in your configuration file.
+The <b>JobCredentialPrivateKey</b> file must be readable only by <b>SlurmUser</b>.
 The <b>JobCredentialPublicCertificate</b> file must be readable by all users.
-Note that you should build the key files one on node and then distribute 
+Note that you should build the key files one on node and then distribute
 them to all nodes in the cluster.
-This insures that all nodes have a consistent set of digital signature 
+This insures that all nodes have a consistent set of digital signature
 keys.
-These keys are used by <i>slurmctld</i> to construct a job step 
-credential, which is sent to <i>srun</i> and then forwarded to 
+These keys are used by <i>slurmctld</i> to construct a job step
+credential, which is sent to <i>srun</i> and then forwarded to
 <i>slurmd</i> to initiate job steps.</p>
 
 <p class="commandline" style="margin-left:.2in">
 <i>openssl genrsa -out &lt;sysconfdir&gt;/slurm.key 1024</i><br>
-<i>openssl rsa -in &lt;sysconfdir&gt;/slurm.key -pubout -out  &lt;sysconfdir&gt;/slurm.cert</i> 
+<i>openssl rsa -in &lt;sysconfdir&gt;/slurm.key -pubout -out  &lt;sysconfdir&gt;/slurm.cert</i>
 </p>
 
 <h3>MUNGE</h3>
@@ -476,58 +476,58 @@ This will be addressed in the installation and configuration of MUNGE.</p>
 
 <h3>Authentication</h3>
 <p>Authentication of communications (identifying who generated a particular
-message) between SLURM components can use a different security mechanism 
-that is configurable.  
+message) between SLURM components can use a different security mechanism
+that is configurable.
 You must specify one &quot;auth&quot; plugin for this purpose using the
-<b>AuthType</b> configuration parameter. 
-Currently, only three authentication plugins are supported: 
-<b>auth/none</b>, <b>auth/authd</b>, and <b>auth/munge</b>. 
-The auth/none plugin is built by default, but either 
-Brent Chun's <a href="http://www.theether.org/authd/">authd</a>, 
-or LLNL's <a href="http://home.gna.org/munge/">MUNGE</a> 
-should be installed in order to get properly authenticated communications. 
+<b>AuthType</b> configuration parameter.
+Currently, only three authentication plugins are supported:
+<b>auth/none</b>, <b>auth/authd</b>, and <b>auth/munge</b>.
+The auth/none plugin is built by default, but either
+Brent Chun's <a href="http://www.theether.org/authd/">authd</a>,
+or LLNL's <a href="http://home.gna.org/munge/">MUNGE</a>
+should be installed in order to get properly authenticated communications.
 Unless you are experience with authd, we recommend the use of MUNGE.
-The configure script in the top-level directory of this distribution will 
-determine which authentication plugins may be built. 
+The configure script in the top-level directory of this distribution will
+determine which authentication plugins may be built.
 The configuration file specifies which of the available plugins will be utilized. </p>
 
 <h3>Pluggable Authentication Module (PAM) support</h3>
-<p>A PAM module (Pluggable Authentication Module) is available for SLURM that 
-can prevent a user from accessing a node which he has not been allocated, 
+<p>A PAM module (Pluggable Authentication Module) is available for SLURM that
+can prevent a user from accessing a node which he has not been allocated,
 if that mode of operation is desired.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Starting the Daemons</h2>
-<p>For testing purposes you may want to start by just running slurmctld and slurmd 
-on one node. By default, they execute in the background. Use the <span class="commandline">-D</span> 
-option for each daemon to execute them in the foreground and logging will be done 
-to your terminal. The <span class="commandline">-v</span> option will log events 
-in more detail with more v's increasing the level of detail (e.g. <span class="commandline">-vvvvvv</span>). 
-You can use one window to execute "<i>slurmctld -D -vvvvvv</i>", 
+<p>For testing purposes you may want to start by just running slurmctld and slurmd
+on one node. By default, they execute in the background. Use the <span class="commandline">-D</span>
+option for each daemon to execute them in the foreground and logging will be done
+to your terminal. The <span class="commandline">-v</span> option will log events
+in more detail with more v's increasing the level of detail (e.g. <span class="commandline">-vvvvvv</span>).
+You can use one window to execute "<i>slurmctld -D -vvvvvv</i>",
 a second window to execute "<i>slurmd -D -vvvvv</i>".
-You may see errors such as "Connection refused" or "Node X not responding" 
-while one daemon is operative and the other is being started, but the 
-daemons can be started in any order and proper communications will be 
-established once both daemons complete initialization. 
-You can use a third window to execute commands such as 
+You may see errors such as "Connection refused" or "Node X not responding"
+while one daemon is operative and the other is being started, but the
+daemons can be started in any order and proper communications will be
+established once both daemons complete initialization.
+You can use a third window to execute commands such as
 "<i>srun -N1 /bin/hostname</i>" to confirm functionality.</p>
 
-<p>Another important option for the daemons is "-c" 
+<p>Another important option for the daemons is "-c"
 to clear previous state information. Without the "-c"
-option, the daemons will restore any previously saved state information: node 
-state, job state, etc. With the "-c" option all 
-previously running jobs will be purged and node state will be restored to the 
-values specified in the configuration file. This means that a node configured 
-down manually using the <span class="commandline">scontrol</span> command will 
-be returned to service unless also noted as being down in the configuration file. 
+option, the daemons will restore any previously saved state information: node
+state, job state, etc. With the "-c" option all
+previously running jobs will be purged and node state will be restored to the
+values specified in the configuration file. This means that a node configured
+down manually using the <span class="commandline">scontrol</span> command will
+be returned to service unless also noted as being down in the configuration file.
 In practice, SLURM restarts with preservation consistently.</p>
-<p>A thorough battery of tests written in the &quot;expect&quot; language is also 
+<p>A thorough battery of tests written in the &quot;expect&quot; language is also
 available. </p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Administration Examples</h2>
-<p><span class="commandline">scontrol</span> can be used to print all system information 
-and modify most of it. Only a few examples are shown below. Please see the scontrol 
+<p><span class="commandline">scontrol</span> can be used to print all system information
+and modify most of it. Only a few examples are shown below. Please see the scontrol
 man page for full details. The commands and options are all case insensitive.</p>
 <p>Print detailed state of all jobs in the system.</p>
 <pre>
@@ -550,8 +550,8 @@ JobId=476 UserId=bob(6885) Name=sleep JobState=RUNNING
    ReqProcs=0 MinNodes=0 Shared=0 Contiguous=0
    MinProcs=0 MinMemory=0 Features=(null) MinTmpDisk=0
    ReqNodeList=(null) ReqNodeListIndecies=-1
-</pre> <p>Print the detailed state of job 477 and change its priority to 
-zero. A priority of zero prevents a job from being initiated (it is held in &quot;pending&quot; 
+</pre> <p>Print the detailed state of job 477 and change its priority to
+zero. A priority of zero prevents a job from being initiated (it is held in &quot;pending&quot;
 state).</p>
 <pre>
 adev0: scontrol
@@ -560,34 +560,34 @@ JobId=477 UserId=bob(6885) Name=sleep JobState=PENDING
    Priority=4294901286 Partition=batch BatchFlag=0
    <i>more data removed....</i>
 scontrol: update JobId=477 Priority=0
-</pre> 
+</pre>
 <p class="footer"><a href="#top">top</a></p>
-<p>Print the state of node adev13 and drain it. To drain a node specify a new 
-state of DRAIN, DRAINED, or DRAINING. SLURM will automatically set it to the appropriate 
-value of either DRAINING or DRAINED depending on whether the node is allocated 
+<p>Print the state of node adev13 and drain it. To drain a node specify a new
+state of DRAIN, DRAINED, or DRAINING. SLURM will automatically set it to the appropriate
+value of either DRAINING or DRAINED depending on whether the node is allocated
 or not. Return it to service later.</p>
 <pre>
 adev0: scontrol
 scontrol: show node adev13
 NodeName=adev13 State=ALLOCATED CPUs=2 RealMemory=3448 TmpDisk=32000
-   Weight=16 Partition=debug Features=(null) 
+   Weight=16 Partition=debug Features=(null)
 scontrol: update NodeName=adev13 State=DRAIN
 scontrol: show node adev13
 NodeName=adev13 State=DRAINING CPUs=2 RealMemory=3448 TmpDisk=32000
-   Weight=16 Partition=debug Features=(null) 
+   Weight=16 Partition=debug Features=(null)
 scontrol: quit
 <i>Later</i>
-adev0: scontrol 
+adev0: scontrol
 scontrol: show node adev13
 NodeName=adev13 State=DRAINED CPUs=2 RealMemory=3448 TmpDisk=32000
-   Weight=16 Partition=debug Features=(null) 
+   Weight=16 Partition=debug Features=(null)
 scontrol: update NodeName=adev13 State=IDLE
-</pre> <p>Reconfigure all SLURM daemons on all nodes. This should 
+</pre> <p>Reconfigure all SLURM daemons on all nodes. This should
 be done after changing the SLURM configuration file.</p>
 <pre>
 adev0: scontrol reconfig
-</pre> <p>Print the current SLURM configuration. This also reports if the 
-primary and secondary controllers (slurmctld daemons) are responding. To just 
+</pre> <p>Print the current SLURM configuration. This also reports if the
+primary and secondary controllers (slurmctld daemons) are responding. To just
 see the state of the controllers, use the command <span class="commandline">ping</span>.</p>
 <pre>
 adev0: scontrol show config
@@ -610,32 +610,32 @@ adev0: scontrol shutdown
 </pre> <p class="footer"><a href="#top">top</a></p>
 
 <h2>Testing</h2>
-<p>An extensive test suite is available within the SLURM distribution 
-in <i>testsuite/expect</i>. 
-There are about 250 tests which will execute on the order of 2000 jobs 
-and 5000 job steps. 
-Depending upon your system configuration and performance, this test 
+<p>An extensive test suite is available within the SLURM distribution
+in <i>testsuite/expect</i>.
+There are about 250 tests which will execute on the order of 2000 jobs
+and 5000 job steps.
+Depending upon your system configuration and performance, this test
 suite will take roughly 80 minutes to complete.
 The file <i>testsuite/expect/globals</i> contains default paths and
 procedures for all of the individual tests.  You will need to edit this
 file to specify where SLURM and other tools are installed.
-Set your working directory to <i>testsuite/expect</i> before 
+Set your working directory to <i>testsuite/expect</i> before
 starting these tests.
-Tests may be executed individually by name (e.g.  <i>test1.1</i>) 
-or the full test suite may be executed with the single command 
+Tests may be executed individually by name (e.g.  <i>test1.1</i>)
+or the full test suite may be executed with the single command
 <i>regression</i>.
 See <i>testsuite/expect/README</i> for more information.</p>
 
 <h2>Upgrades</h2>
-<p>When upgrading to a new major or minor release of SLURM (e.g. 1.1.x to 1.2.x) 
-all running and pending jobs will be purged due to changes in state save 
-information. It is possible to develop software to translate state information 
+<p>When upgrading to a new major or minor release of SLURM (e.g. 1.1.x to 1.2.x)
+all running and pending jobs will be purged due to changes in state save
+information. It is possible to develop software to translate state information
 between versions, but we do not expect to do so.
 When upgrading to a new micro release of SLURM (e.g. 1.2.1 to 1.2.2) all
 running and pending jobs will be preserved. Just install a new version of
 SLURM and restart the daemons.
-An exception to this is that jobs may be lost when installing new pre-release 
-versions (e.g. 1.3.0-pre1 to 1.3.0-pre2). We'll try to note these cases 
+An exception to this is that jobs may be lost when installing new pre-release
+versions (e.g. 1.3.0-pre1 to 1.3.0-pre2). We'll try to note these cases
 in the NEWS file.
 Contents of major releases are also described in the RELEASE_NOTES file.
 
diff --git a/doc/html/reservations.shtml b/doc/html/reservations.shtml
index 4e8fd582a177d4dcaa479203a9560e438013f8ba..0123ae48d073a0b0984fbfe8dfdfa442982ae8d3 100644
--- a/doc/html/reservations.shtml
+++ b/doc/html/reservations.shtml
@@ -4,16 +4,16 @@
 
 <p>SLURM version 2.0 has the ability to reserve resources for jobs
 being executed by select users and/or select bank accounts.
-A resource reservation identifies the nodes of a resource reservation 
+A resource reservation identifies the nodes of a resource reservation
 and a time period during which the reservation is available.
 Note that resource reservations are not compatible with SLURM's
 gang scheduler plugin since the termination time of running jobs
 is not possible to accurately predict.</p>
 
 <p>Reservations can be created, updated, or destroyed only by user root
-or the configured <i>SlurmUser</i> using the <i>scontrol</i> command. 
+or the configured <i>SlurmUser</i> using the <i>scontrol</i> command.
 The <i>scontrol</i>, <i>smap</i> and <i>sview</i> commands can be used
-to view reservations. 
+to view reservations.
 The man pages for the various commands contain details.</p>
 
 <p>Note for users of Maui or Moab schedulers: <br>
@@ -24,19 +24,19 @@ but should use their own advanced reservation system.</p>
 
 <p>One common mode of operation for a reservation would be to reserve
 an entire computer at a particular time for a system down time.
-The example below shows the creation of a full-system reservation 
-at 16:00 hours on 6 February and lasting for 120 minutes. 
-The "maint" flag is used to identify the reservation for accounting 
-purposes as system maintenance. 
-The "ignore_jobs" flag is used to indicate that we can ignore currently 
-running jobs when creating this reservation. 
+The example below shows the creation of a full-system reservation
+at 16:00 hours on 6 February and lasting for 120 minutes.
+The "maint" flag is used to identify the reservation for accounting
+purposes as system maintenance.
+The "ignore_jobs" flag is used to indicate that we can ignore currently
+running jobs when creating this reservation.
 By default, only nodes which are not expected to have a running job
-at the start time can be reserved (the time limit of all running 
+at the start time can be reserved (the time limit of all running
 jobs will have been reached).
-In this case we can manually cancel the running jobs as needed 
+In this case we can manually cancel the running jobs as needed
 to perform system maintenance.
-As the reservation time approaches, 
-only jobs that can complete by the reservation time will be 
+As the reservation time approaches,
+only jobs that can complete by the reservation time will be
 initiated.</p>
 <pre>
 $ scontrol create reservation starttime=2009-02-06T16:00:00 \
@@ -44,7 +44,7 @@ $ scontrol create reservation starttime=2009-02-06T16:00:00 \
 Reservation created: root_3
 
 $ scontrol show reservation
-ReservationName=root_3 StartTime=2009-02-06T16:00:00 
+ReservationName=root_3 StartTime=2009-02-06T16:00:00
    EndTime=2009-02-06T18:00:00 Duration=120
    Nodes=ALL NodeCnt=20
    Features=(null) PartitionName=(null)
@@ -52,7 +52,7 @@ ReservationName=root_3 StartTime=2009-02-06T16:00:00
    Users=root Accounts=(null)
 </pre>
 
-<p>A variation of this would be to configure license to 
+<p>A variation of this would be to configure license to
 represent system resources, such as a global file system.
 One could create a reservation for all of those licenses
 in order to perform maintenance on that resource.</p>
@@ -63,7 +63,7 @@ $ scontrol create reservation starttime=2009-04-06T16:00:00 \
 Reservation created: root_4
 
 $ scontrol show reservation
-ReservationName=root_4 StartTime=2009-04-06T16:00:00 
+ReservationName=root_4 StartTime=2009-04-06T16:00:00
    EndTime=2009-04-06T18:00:00 Duration=120
    Nodes= NodeCnt=0
    Features=(null) PartitionName=(null)
@@ -72,7 +72,7 @@ ReservationName=root_4 StartTime=2009-04-06T16:00:00
 </pre>
 
 <p>Another mode of operation would be to reserve specific nodes
-for an indefinite period in order to study problems on those 
+for an indefinite period in order to study problems on those
 nodes. This could also be accomplished using a SLURM partition
 specifically for this purpose, but that would fail to capture
 the maintenance nature of their use.</p>
@@ -82,16 +82,16 @@ $ scontrol create reservation user=root starttime=now \
 Reservation created: root_5
 
 $ scontrol show res
-ReservationName=root_5 StartTime=2009-02-04T16:22:57 
+ReservationName=root_5 StartTime=2009-02-04T16:22:57
    EndTime=2009-02-04T16:21:57 Duration=4294967295
-   Nodes=sun000 NodeCnt=1 
+   Nodes=sun000 NodeCnt=1
    Features=(null) PartitionName=(null)
    Flags=MAINT,SPEC_NODES Licenses=(null)
    Users=root Accounts=(null)
 </pre>
 
-<p>Our final example is to reserve ten nodes in the default 
-SLURM partition starting at noon and with a duration of 60 
+<p>Our final example is to reserve ten nodes in the default
+SLURM partition starting at noon and with a duration of 60
 minutes occurring daily. The reservation will be available
 only to users alan and brenda.</p>
 <pre>
@@ -100,10 +100,10 @@ $ scontrol create reservation user=alan,brenda \
 Reservation created: alan_6
 
 $ scontrol show res
-ReservationName=alan_6 StartTime=2009-02-05T12:00:00 
+ReservationName=alan_6 StartTime=2009-02-05T12:00:00
    EndTime=2009-02-05T13:00:00 Duration=60
-   Nodes=sun[000-003,007,010-013,017] NodeCnt=10 
-   Features=(null) PartitionName=pdebug 
+   Nodes=sun[000-003,007,010-013,017] NodeCnt=10
+   Features=(null) PartitionName=pdebug
    Flags=DAILY Licenses=(null)
    Users=alan,brenda Accounts=(null)
 </pre>
@@ -111,7 +111,7 @@ ReservationName=alan_6 StartTime=2009-02-05T12:00:00
 <p>Note that specific nodes to be associated with the reservation are
 made immediately after creation of the reservation. This permits
 users to stage files to the nodes in preparation for use during the
-reservation. Note that the reservation creation request can also 
+reservation. Note that the reservation creation request can also
 identify the partition from which to select the nodes or _one_
 feature that every selected node must contain.</p>
 
@@ -119,10 +119,10 @@ feature that every selected node must contain.</p>
 
 <p>The reservation create response includes the reservation's name.
 This name is automatically generated by SLURM based upon the first
-user or account name and a numeric suffix. In order to use the 
+user or account name and a numeric suffix. In order to use the
 reservation, the job submit request must explicitly specify that
 reservation name. The job must be contained completely within the
-named reservation. The job will be canceled after the reservation 
+named reservation. The job will be canceled after the reservation
 reaches its EndTime. If letting the job continue execution after
 the reservation EndTime, a configuration option <i>ResvOverRun</i>
 can be set to control how long the job can continue execution.</p>
@@ -133,7 +133,7 @@ sbatch: Submitted batch job 65540
 
 <h2>Reservation Modification</h2>
 
-<p>Reservations can be modified by user root as desired. 
+<p>Reservations can be modified by user root as desired.
 For example their duration could be altered or the users
 granted access changed as shown below:</p>
 <pre>
@@ -142,17 +142,17 @@ $ scontrol update ReservationName=root_3 \
 Reservation updated.
 
 bash-3.00$ scontrol show ReservationName=root_3
-ReservationName=root_3 StartTime=2009-02-06T16:00:00 
+ReservationName=root_3 StartTime=2009-02-06T16:00:00
    EndTime=2009-02-06T18:30:00 Duration=150
-   Nodes=ALL NodeCnt=20 Features=(null) 
+   Nodes=ALL NodeCnt=20 Features=(null)
    PartitionName=(null) Flags=MAINT,SPEC_NODES Licenses=(null)
    Users=admin Accounts=(null)
 </pre>
 
 <h2>Reservation Deletion</h2>
 
-<p>Reservations are automatically purged after their end time. 
-They may also be manually deleted as shown below. 
+<p>Reservations are automatically purged after their end time.
+They may also be manually deleted as shown below.
 Note that a reservation can not be deleted while there are
 jobs running in it.</p>
 <pre>
@@ -161,9 +161,9 @@ $ scontrol delete ReservationName=alan_6
 
 <h2>Overlapping Reservations</h2>
 
-<p>By default, reservations must not overlap. They must either include 
+<p>By default, reservations must not overlap. They must either include
 different nodes or operate at different times. If specific nodes
-are not specified when a reservation is created, SLURM will 
+are not specified when a reservation is created, SLURM will
 automatically select nodes to avoid overlap and insure that
 the selected nodes are available when the reservation begins.</p>
 
@@ -171,54 +171,54 @@ the selected nodes are available when the reservation begins.</p>
 with two specific modes of operation available.
 For ease of system maintenance, you can create a reservation
 with the "maint" flag that overlaps existing reservations.
-This permits an administrator to easily create a maintenance 
-reservation for an entire cluster without needing to remove 
-or reschedule pre-existing reservations. Users requesting access 
+This permits an administrator to easily create a maintenance
+reservation for an entire cluster without needing to remove
+or reschedule pre-existing reservations. Users requesting access
 to one of these pre-existing reservations will be prevented from
-using resources that are also in this maintenance reservation. 
+using resources that are also in this maintenance reservation.
 For example, users alan and brenda might have a reservation for
 some nodes daily from noon until 1PM. If there is a maintenance
-reservation for all nodes starting at 12:30PM, the only jobs they 
+reservation for all nodes starting at 12:30PM, the only jobs they
 may start in their reservation would have to be completed by 12:30PM,
 when the maintenance reservation begins.</p>
 
-<p>The second exception operates in the same manner as a maintenance 
-reservation except that is it not logged in the accounting system as nodes 
-reserved for maintenance. 
-It requires the use of the "overlap" flag when creating the second 
+<p>The second exception operates in the same manner as a maintenance
+reservation except that is it not logged in the accounting system as nodes
+reserved for maintenance.
+It requires the use of the "overlap" flag when creating the second
 reservation.
 This might be used to insure availability of resources for a specific
-user within a group having a reservation. 
-Using the previous example of alan and brenda having a 10 node reservation 
+user within a group having a reservation.
+Using the previous example of alan and brenda having a 10 node reservation
 for 60 minutes, we might want to reserve 4 nodes of that for for brenda
-during the first 30 minutes of the time period. 
+during the first 30 minutes of the time period.
 In this case, the creation of one overlapping reservation (for a total of
-two reservations) may be simpler than creating three separate reservations, 
-partly since the use of any reservation requires the job specification 
+two reservations) may be simpler than creating three separate reservations,
+partly since the use of any reservation requires the job specification
 of the reservation name.
 <ol>
-<li>A six node reservation for both alan and brenda that lasts the full 
+<li>A six node reservation for both alan and brenda that lasts the full
 60 minutes</li>
 <li>A four node reservation for brenda for the first 30 minutes</li>
-<li>A four node reservation for both alan and brenda that lasts for the 
+<li>A four node reservation for both alan and brenda that lasts for the
 final 30 minutes</li>
 </ol></p>
 
-<p>If the "maint" or "overlap" flag is used when creating reservations, 
-one could create a reservation within a reservation within a third 
-reservation. 
-Note a reservation having a "maint" or "overlap" flag will not have 
-resources removed from it by a subsequent reservation also having a 
-"maint" or "overlap" flag, so nesting of reservations only works to a 
+<p>If the "maint" or "overlap" flag is used when creating reservations,
+one could create a reservation within a reservation within a third
+reservation.
+Note a reservation having a "maint" or "overlap" flag will not have
+resources removed from it by a subsequent reservation also having a
+"maint" or "overlap" flag, so nesting of reservations only works to a
 depth of two.</p>
 
 <h2>Reservation Accounting</h2>
 
 <p>Jobs executed within a reservation are accounted for using the appropriate
-user and bank account. If resources within a reservation are not used, those 
+user and bank account. If resources within a reservation are not used, those
 resources will be accounted for as being used by all users or bank accounts
 associated with the reservation on an equal basis (e.g. if two users are
-eligible to use a reservation and neither does, each user will be reported 
+eligible to use a reservation and neither does, each user will be reported
 to have used half of the reserved resources).</p>
 
 <h2>Future Work</h2>
@@ -226,12 +226,12 @@ to have used half of the reserved resources).</p>
 <p>Several enhancements are anticipated at some point in the future.
 <ol>
 <li>The automatic selection of nodes for a reservation create request may be
-sub-optimal in terms of locality (for optimized application 
-communication).</li> 
+sub-optimal in terms of locality (for optimized application
+communication).</li>
 <li>Reservations made within a partition having gang scheduling assumes
-the highest level rather than the actual level of time-slicing when 
+the highest level rather than the actual level of time-slicing when
 considering the initiation of jobs.
-This will prevent the initiation of some jobs which would complete execution 
+This will prevent the initiation of some jobs which would complete execution
 before a reservation given fewer jobs to time-slice with.</li>
 </ol>
 
diff --git a/doc/html/resource_limits.shtml b/doc/html/resource_limits.shtml
index 971ab588cfb5938f3e8f327b6b54b6cfec31334d..c7a2bf859ab3adc13b04f9c39d4148c8c4d5dcb0 100644
--- a/doc/html/resource_limits.shtml
+++ b/doc/html/resource_limits.shtml
@@ -3,11 +3,11 @@
 <h1>Resource Limits</h1>
 
 <p>SLURM scheduling policy support was significantly changed
-in version 2.0 in order to take advantage of the database 
+in version 2.0 in order to take advantage of the database
 integration used for storing accounting information.
-This document describes the capabilities available in 
+This document describes the capabilities available in
 SLURM version 2.0.
-New features are under active development. 
+New features are under active development.
 Familiarity with SLURM's <a href="accounting">Accounting</a> web page
 is strongly recommended before use of this document.</p>
 
@@ -17,37 +17,37 @@ but should use their own resource limits mechanisms.</p>
 
 <h2>Configuration</h2>
 
-<p>Scheduling policy information must be stored in a database 
+<p>Scheduling policy information must be stored in a database
 as specified by the <b>AccountingStorageType</b> configuration parameter
 in the <b>slurm.conf</b> configuration file.
-Information can be recorded in either <a href="http://www.mysql.com/">MySQL</a> 
+Information can be recorded in either <a href="http://www.mysql.com/">MySQL</a>
 or <a href="http://www.postgresql.org/">PostgreSQL</a>.
-For security and performance reasons, the use of 
-SlurmDBD (SLURM Database Daemon) as a front-end to the 
-database is strongly recommended. 
+For security and performance reasons, the use of
+SlurmDBD (SLURM Database Daemon) as a front-end to the
+database is strongly recommended.
 SlurmDBD uses a SLURM authentication plugin (e.g. Munge).
 SlurmDBD also uses an existing SLURM accounting storage plugin
 to maximize code reuse.
 SlurmDBD uses data caching and prioritization of pending requests
 in order to optimize performance.
-While SlurmDBD relies upon existing SLURM plugins for authentication 
-and database use, the other SLURM commands and daemons are not required 
-on the host where SlurmDBD is installed. 
+While SlurmDBD relies upon existing SLURM plugins for authentication
+and database use, the other SLURM commands and daemons are not required
+on the host where SlurmDBD is installed.
 Only the <i>slurmdbd</i> and <i>slurm-plugins</i> RPMs are required
 for SlurmDBD execution.</p>
 
 <p>Both accounting and scheduling policy are configured based upon
-an <i>association</i>. An <i>association</i> is a 4-tuple consisting 
-of the cluster name, bank account, user and (optionally) the SLURM 
+an <i>association</i>. An <i>association</i> is a 4-tuple consisting
+of the cluster name, bank account, user and (optionally) the SLURM
 partition.
-In order to enforce scheduling policy, set the value of 
+In order to enforce scheduling policy, set the value of
 <b>AccountingStorageEnforce</b>:
 This option contains a comma separated list of options you may want to
- enforce.  The valid options are 
+ enforce.  The valid options are
 <ul>
 <li>associations - This will prevent users from running jobs if
 their <i>association</i> is not in the database. This option will
-prevent users from accessing invalid accounts.  
+prevent users from accessing invalid accounts.
 </li>
 <li>limits - This will enforce limits set to associations.  By setting
   this option, the 'associations' option is also set.
@@ -63,13 +63,13 @@ each association in the database.  By setting this option, the
   set to true.
 </li>
 </ul>
-(NOTE: The association is a combination of cluster, account, 
+(NOTE: The association is a combination of cluster, account,
 user names and optional partition name.)
 <br>
-Without AccountingStorageEnforce being set (the default behavior) 
+Without AccountingStorageEnforce being set (the default behavior)
 jobs will be executed based upon policies configured in SLURM on each
 cluster.
-<br> 
+<br>
 It is advisable to run without the option 'limits' set when running a
 scheduler on top of SLURM, like Moab, that does not update in real
 time their limits per association.</li>
@@ -78,32 +78,32 @@ time their limits per association.</li>
 <h2>Tools</h2>
 
 <p>The tool used to manage accounting policy is <i>sacctmgr</i>.
-It can be used to create and delete cluster, user, bank account, 
+It can be used to create and delete cluster, user, bank account,
 and partition records plus their combined <i>association</i> record.
-See <i>man sacctmgr</i> for details on this tools and examples of 
+See <i>man sacctmgr</i> for details on this tools and examples of
 its use.</p>
 
 <p>A web interface with graphical output is currently under development.</p>
 
-<p>Changes made to the scheduling policy are uploaded to 
-the SLURM control daemons on the various clusters and take effect 
-immediately. When an association is delete, all jobs running or 
+<p>Changes made to the scheduling policy are uploaded to
+the SLURM control daemons on the various clusters and take effect
+immediately. When an association is delete, all jobs running or
 pending which belong to that association are immediately canceled.
-When limits are lowered, running jobs will not be canceled to 
+When limits are lowered, running jobs will not be canceled to
 satisfy the new limits, but the new lower limits will be enforced.</p>
 
 <h2>Policies supported</h2>
 
-<p> A limited subset of scheduling policy options are currently 
-supported. 
-The available options are expected to increase as development 
-continues. 
-Most of these scheduling policy options are available not only 
-for a user association, but also for each cluster and account. 
-If a new association is created for some user and some scheduling 
-policy options is not specified, the default will be the option 
-for the cluster plus account pair and if that is not specified 
-then the cluster and if that is not specified then no limit 
+<p> A limited subset of scheduling policy options are currently
+supported.
+The available options are expected to increase as development
+continues.
+Most of these scheduling policy options are available not only
+for a user association, but also for each cluster and account.
+If a new association is created for some user and some scheduling
+policy options is not specified, the default will be the option
+for the cluster plus account pair and if that is not specified
+then the cluster and if that is not specified then no limit
 will apply.</p>
 
 <p>Currently available scheduling policy options:</p>
diff --git a/doc/html/schedplugins.shtml b/doc/html/schedplugins.shtml
index 94250edce85099e8a304006804f7191256dcb0e5..376c098af9b6eb39352a8ee7f5daa49aad50bd97 100644
--- a/doc/html/schedplugins.shtml
+++ b/doc/html/schedplugins.shtml
@@ -3,49 +3,49 @@
 <h1><a name="top">SLURM Scheduler Plugin API</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM scheduler plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p> This document describes SLURM scheduler plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 scheduler plugins. This is version 100 of the API.</p>
 
 <p>It is noteworthy that two different models are used for job scheduling.
 The <b>backfill</b> scheduler lets SLURM establish the initial job priority
 and can periodically alter job priorities to change their order within the queue.
 The <b>wiki</b> scheduler establishes an initial priority of zero (held) for
-all jobs. These jobs only begin execution when the <b>wiki</b> scheduler 
-explicitly raises the their priority (releasing them). 
-Developers may use the model that best fits their needs. 
+all jobs. These jobs only begin execution when the <b>wiki</b> scheduler
+explicitly raises the their priority (releasing them).
+Developers may use the model that best fits their needs.
 Note that a separate <a href="selectplugins.html">node selection plugin</a>
 is available for controlling that aspect of scheduling.</p>
 
 <p>SLURM scheduler plugins are SLURM plugins that implement the SLURM scheduler
-API described herein. They must conform to the SLURM Plugin API with the following 
+API described herein. They must conform to the SLURM Plugin API with the following
 specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;sched.&quot; The minor type can be any recognizable 
+The major type must be &quot;sched.&quot; The minor type can be any recognizable
 abbreviation for the type of scheduler. We recommend, for example:</p>
 <ul>
-<li><b>builtin</b>&#151;A plugin that implements the API without providing any actual 
+<li><b>builtin</b>&#151;A plugin that implements the API without providing any actual
 scheduling services. This is the default behavior and implements first-in-first-out scheduling.</li>
-<li><b>backfill</b>&#151;Raise the priority of jobs if doing so results in their starting earlier 
+<li><b>backfill</b>&#151;Raise the priority of jobs if doing so results in their starting earlier
 without any delay in the expected initiation time of any higher priority job.</li>
 <li><b>wiki</b>&#151;Uses
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-The Maui Scheduler</a> (Wiki version) 
+The Maui Scheduler</a> (Wiki version)
 as an external entity to control SLURM job scheduling.</li>
-<li><b>wiki2</b>&#151;Uses 
+<li><b>wiki2</b>&#151;Uses
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
 Moab Cluster Suite</a> as an external entity to control SLURM job scheduling.
-Note that wiki2 is an expanded version of the wiki plugin with additional 
+Note that wiki2 is an expanded version of the wiki plugin with additional
 functions supported specifically for Moab.</li>
 
 </ul>
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for scheduler support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for scheduler support.
 Note carefully, however, the versioning discussion below.</p>
-<p>The programmer is urged to study 
+<p>The programmer is urged to study
 <span class="commandline">src/plugins/sched/backfill</span> and
-<span class="commandline">src/plugins/sched/builtin</span> 
+<span class="commandline">src/plugins/sched/builtin</span>
 for sample implementations of a SLURM scheduler plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -53,18 +53,18 @@ for sample implementations of a SLURM scheduler plugin.</p>
 <p>The implementation must maintain (though not necessarily directly export) an
 enumerated <span class="commandline">errno</span>  to allow SLURM to discover
 as practically as possible the reason for any failed API call. Plugin-specific enumerated
-integer values should be used when appropriate. It is desirable that these values 
+integer values should be used when appropriate. It is desirable that these values
 be mapped into the range ESLURM_SCHED_MIN and ESLURM_SCHED_MAX
 as defined in <span class="commandline">slurm/slurm_errno.h</span>.
 The error number should be returned by the function
 <a href="#get_errno"><span class="commandline">slurm_sched_get_errno()</span></a>
 and  string describing the error's meaning should be returned by the function
-<a href="#strerror"><span class="commandline">slurm_sched_strerror()</span></a> 
+<a href="#strerror"><span class="commandline">slurm_sched_strerror()</span></a>
 described below.</p>
 
 <p>These values must not be used as return values in integer-valued functions
 in the API. The proper error return value from integer-valued functions is SLURM_ERROR.
-The implementation should endeavor to provide useful and pertinent information by 
+The implementation should endeavor to provide useful and pertinent information by
 whatever means is practical. In some cases this means an errno for each credential,
 since plugins must be re-entrant. If a plugin maintains a global errno in place of or in
 addition to a per-credential errno, it is not required to enforce mutual exclusion on it.
@@ -74,7 +74,7 @@ SLURM_SUCCESS. </p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <p class="commandline">int slurm_sched_plugin_reconfig (void);</p>
@@ -88,39 +88,39 @@ to indicate the reason for failure.</p>
 <p class="commandline">int slurm_sched_plugin_schedule (void);</p>
 <p style="margin-left:.2in"><b>Description</b>: For passive schedulers, invoke a scheduling pass.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: None</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_sched_plugin_newalloc (void);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the successful allocation of resources to a job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: Pointer to the slurmctld job structure. This can be used to
 get partition, allocated resources, time limit, etc.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_sched_plugin_freealloc (void);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the successful release of resources for a job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: Pointer to the slurmctld job structure. This can be used to
 get partition, allocated resources, time limit, etc.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">uint32_t slurm_sched_plugin_initial_priority (
 uint32_t last_prio, struct job_record *job_ptr);</p>
 <p style="margin-left:.2in"><b>Description</b>: Establish the initial priority of a new job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<b>last_prio</b> (input) default priority of the previously submitted job. 
-This can be used to provide First-In-First-Out scheduling by assigning the 
+<b>last_prio</b> (input) default priority of the previously submitted job.
+This can be used to provide First-In-First-Out scheduling by assigning the
 new job a priority lower than this value.
-This could also be used to establish an initial priority of zero for all jobs, 
-representing a "held" state. 
-The scheduler plugin can then decide where and when to initiate pending jobs 
+This could also be used to establish an initial priority of zero for all jobs,
+representing a "held" state.
+The scheduler plugin can then decide where and when to initiate pending jobs
 by altering their priority and (optionally) list of required nodes.<br>
-<b>job_ptr</b> (input) 
-Pointer to the slurmctld job structure. This can be used to get partition, 
+<b>job_ptr</b> (input)
+Pointer to the slurmctld job structure. This can be used to get partition,
 resource requirements, time limit, etc.</p>
 <p style="margin-left:.2in"><b>Returns</b>: The priority to be assigned to this job.</p>
 
@@ -130,18 +130,18 @@ resource requirements, time limit, etc.</p>
 <p style="margin-left:.2in"><b>Returns</b>: Nothing.</p>
 
 <p class="commandline">void slurm_sched_plugin_partition_change (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note that some partition state change 
+<p style="margin-left:.2in"><b>Description</b>: Note that some partition state change
 happened such as time or size limits.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: None</p>
 <p style="margin-left:.2in"><b>Returns</b>: Nothing.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <p class="commandline">char *slurm_sched_get_conf (void);</p></a>
-<p style="margin-left:.2in"><b>Description</b>: Return scheduler specific 
+<p style="margin-left:.2in"><b>Description</b>: Return scheduler specific
 configuration information to be reported for the <i>scontrol show configuration</i>
 command.</p>
 <p style="margin-left:.2in"><b>Arguments</b>: None</p>
-<p style="margin-left:.2in"><b>Returns</b>: A string containing configuration 
+<p style="margin-left:.2in"><b>Returns</b>: A string containing configuration
 information. The return value is released using the <i>xfree()</i> function.</p>
 
 <a name="get_errno"><p class="commandline">int slurm_sched_get_errno (void);</p></a>
@@ -161,8 +161,8 @@ or NULL if no description found in this plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Versioning</h2>
-<p> This document describes version 100 of the SLURM Scheduler API. Future 
-releases of SLURM may revise this API. A scheduler plugin conveys its ability 
+<p> This document describes version 100 of the SLURM Scheduler API. Future
+releases of SLURM may revise this API. A scheduler plugin conveys its ability
 to implement a particular API version using the mechanism outlined for SLURM plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/selectplugins.shtml b/doc/html/selectplugins.shtml
index 9cc9528c2fbe15beb7f8b473866bb2aa4807813e..8daac3ef65ec6db5c1c5ab3c465af924394253e5 100644
--- a/doc/html/selectplugins.shtml
+++ b/doc/html/selectplugins.shtml
@@ -3,35 +3,35 @@
 <h1><a name="top">Resource Selection Plugin Programmer Guide</a></h1>
 
 <h2>Overview</h2>
-<p>This document describes SLURM resource selection plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p>This document describes SLURM resource selection plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 node selection plugins. This is version 100 of the API.</p>
 
 <p>SLURM node selection plugins are SLURM plugins that implement the SLURM node selection
-API described herein. They are intended to provide a mechanism for both selecting 
-nodes for pending jobs and performing any system-specific tasks for job launch or 
-termination. The plugins must conform to the SLURM Plugin API with the following 
+API described herein. They are intended to provide a mechanism for both selecting
+nodes for pending jobs and performing any system-specific tasks for job launch or
+termination. The plugins must conform to the SLURM Plugin API with the following
 specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;select.&quot; The minor type can be any recognizable 
+The major type must be &quot;select.&quot; The minor type can be any recognizable
 abbreviation for the type of node selection algorithm. We recommend, for example:</p>
 <ul>
-<li><b>linear</b>&#151;A plugin that selects nodes assuming a one-dimensional 
-array of nodes. The nodes are selected so as to minimize the number of consecutive 
-sets of nodes utilizing a best-fit algorithm. While supporting shared nodes, 
-this plugin does not allocate individual processors, but can allocate memory to jobs. 
+<li><b>linear</b>&#151;A plugin that selects nodes assuming a one-dimensional
+array of nodes. The nodes are selected so as to minimize the number of consecutive
+sets of nodes utilizing a best-fit algorithm. While supporting shared nodes,
+this plugin does not allocate individual processors, but can allocate memory to jobs.
 This plugin is recommended for systems without shared nodes.</li>
-<li><b>cons_res</b>&#151;A plugin that can allocate individual processors, 
-memory, etc. within nodes. This plugin is recommended for systems with 
+<li><b>cons_res</b>&#151;A plugin that can allocate individual processors,
+memory, etc. within nodes. This plugin is recommended for systems with
 many non-parallel programs sharing nodes. For more information see
 <a href=cons_res.html>Consumable Resources in SLURM</a>.</li>
-<li><b>bluegene</b>&#151;<a href="http://www.research.ibm.com/bluegene/">IBM Blue Gene</a>  
-node selector. Note that this plugin not only selects the nodes for a job, but performs 
+<li><b>bluegene</b>&#151;<a href="http://www.research.ibm.com/bluegene/">IBM Blue Gene</a>
+node selector. Note that this plugin not only selects the nodes for a job, but performs
 some initialization and termination functions for the job.</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for node selection support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for node selection support.
 Note carefully, however, the versioning discussion below.</p>
 
 <p>A simplified flow of logic follows:
@@ -65,55 +65,55 @@ if (<i>select_p_job_test</i>(all available nodes) != SLURM_SUCCESS) {
 /* wait for slurmctld shutdown request */
 <i>select_p_state_save</i>()
 </pre>
-<p>Depending upon failure modes, it is possible that 
+<p>Depending upon failure modes, it is possible that
 <span class="commandline">select_p_state_save()</span>
 will not be called at slurmctld termination.
 When slurmctld is restarted, other function calls may be replayed.
-<span class="commandline">select_p_node_init()</span> may be used 
+<span class="commandline">select_p_node_init()</span> may be used
 to synchronize the plugin's state with that of slurmctld.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
-<p> These functions are expected to read and/or modify data structures directly in 
-the slurmctld daemon's memory. Slurmctld is a multi-threaded program with independent 
-read and write locks on each data structure type. Therefore the type of operations 
+<p> These functions are expected to read and/or modify data structures directly in
+the slurmctld daemon's memory. Slurmctld is a multi-threaded program with independent
+read and write locks on each data structure type. Therefore the type of operations
 permitted on various data structures is identified for each function.</p>
 
-<p>These functions make use of bitmaps corresponding to the nodes in a table. 
-The function <span class="commandline">select_p_node_init()</span> should 
-be used to establish the initial mapping of bitmap entries to nodes. 
-Functions defined in <i>src/common/bitmap.h</i> should be used for bitmap 
+<p>These functions make use of bitmaps corresponding to the nodes in a table.
+The function <span class="commandline">select_p_node_init()</span> should
+be used to establish the initial mapping of bitmap entries to nodes.
+Functions defined in <i>src/common/bitmap.h</i> should be used for bitmap
 manipulations (these functions are directly accessible from the plugin).</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <h3>State Save Functions</h3>
 
 <p class="commandline">int select_p_state_save (char *dir_name);</p>
-<p style="margin-left:.2in"><b>Description</b>: Save any global node selection state 
-information to a file within the specified directory. The actual file name used is plugin specific. 
-It is recommended that the global switch state contain a magic number for validation purposes. 
+<p style="margin-left:.2in"><b>Description</b>: Save any global node selection state
+information to a file within the specified directory. The actual file name used is plugin specific.
+It is recommended that the global switch state contain a magic number for validation purposes.
 This function is called by the slurmctld deamon on shutdown.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp;
 &nbsp;&nbsp;(input) fully-qualified pathname of a directory into which user SlurmUser (as defined
 in slurm.conf) can create a file and write state information into that file. Cannot be NULL.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
 <p class="commandline">int select_p_state_restore (char *dir_name);</p>
-<p style="margin-left:.2in"><b>Description</b>: Restore any global node selection state 
-information from a file within the specified directory. The actual file name used is plugin specific. 
-It is recommended that any magic number associated with the global switch state be verified. 
+<p style="margin-left:.2in"><b>Description</b>: Restore any global node selection state
+information from a file within the specified directory. The actual file name used is plugin specific.
+It is recommended that any magic number associated with the global switch state be verified.
 This function is called by the slurmctld deamon on startup.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp; 
-&nbsp;&nbsp;(input) fully-qualified pathname of a directory containing a state information file 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp;
+&nbsp;&nbsp;(input) fully-qualified pathname of a directory containing a state information file
 from which user SlurmUser (as defined in slurm.conf) can read. Cannot be NULL.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -121,35 +121,35 @@ the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 <h3>State Initialization Functions</h3>
 
 <p class="commandline">int select_p_node_init (struct node_record *node_ptr, int node_cnt);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the node record data 
-structure. This function is called when the node records are initially established and again 
+<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the node record data
+structure. This function is called when the node records are initially established and again
 when any nodes are added to or removed from the data structure. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> node_ptr</span>&nbsp;&nbsp;&nbsp;(input) pointer 
-to the node data records. Data in these records can read. Nodes deleted after initialization 
-may have their the <i>name</i> field in the record cleared (zero length) rather than 
+<span class="commandline"> node_ptr</span>&nbsp;&nbsp;&nbsp;(input) pointer
+to the node data records. Data in these records can read. Nodes deleted after initialization
+may have their the <i>name</i> field in the record cleared (zero length) rather than
 rebuilding the node records and bitmaps.<br><br>
-<span class="commandline"> node_cnt</span>&nbsp; &nbsp;&nbsp;(input) number 
+<span class="commandline"> node_cnt</span>&nbsp; &nbsp;&nbsp;(input) number
 of node data records.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
 <p class="commandline">int select_p_block_init (List block_list);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the partition record data 
-structure. This function is called when the partition records are initially established and again 
+<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the partition record data
+structure. This function is called when the partition records are initially established and again
 when any partition configurations change. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> part_list</span>&nbsp;&nbsp;&nbsp;(input) list of partition 
-record entries. Note that some of these partitions may have no associated nodes. Also 
+<span class="commandline"> part_list</span>&nbsp;&nbsp;&nbsp;(input) list of partition
+record entries. Note that some of these partitions may have no associated nodes. Also
 consider that nodes can be removed from one partition and added to a different partition.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
 <p class="commandline">int select_p_job_init(List job_list);<p>
-<p style="margin-left:.2in"><b>Description</b>: Used at slurm startup to 
+<p style="margin-left:.2in"><b>Description</b>: Used at slurm startup to
 synchronize plugin (and node) state with that of currently active jobs.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> job_list</span>&nbsp; &nbsp;&nbsp;(input) 
+<span class="commandline"> job_list</span>&nbsp; &nbsp;&nbsp;(input)
 list of slurm jobs from slurmctld job records.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
@@ -158,12 +158,12 @@ the plugin should return SLURM_ERROR.</p>
 <h3>State Synchronization Functions</h3>
 
 <p class="commandline">int select_p_update_block (update_part_msg_t *part_desc_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is called when the admin needs 
+<p style="margin-left:.2in"><b>Description</b>: This function is called when the admin needs
 to manually update the state of a block. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partition
 description variable.  Containing the block name and the state to set the block.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
 <p class="commandline">int select_p_update_nodeinfo(struct node_record *node_ptr);</p>
@@ -199,7 +199,7 @@ state of the node.</p>
 
 <p class="commandline">int select_p_update_sub_node (update_part_msg_t *part_desc_ptr);</p>
 <p style="margin-left:.2in"><b>Description</b>: update the state of a portion of
-a SLURM node. Currently used on BlueGene systems to place node cards within a 
+a SLURM node. Currently used on BlueGene systems to place node cards within a
 midplane into or out of an error state.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partition
@@ -219,12 +219,12 @@ user really means in slurm terms.</p>
 <span class="commandline"> type</span>&nbsp;&nbsp;&nbsp;(input) enum
 telling the plug in what the user is really wanting.<br><br>
 <span class="commandline"> data</span>&nbsp;&nbsp;&nbsp;(input/output)
-Is a void * so depending on the type sent in argument 1 this should 
+Is a void * so depending on the type sent in argument 1 this should
 adjust the variable returning what the user is asking for.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
 
 <p class="commandline">int select_p_reconfigure (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin 
+<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin
 of change in partition configuration or general configuration change.
 The plugin will test global variables for changes as appropriate.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
@@ -236,107 +236,107 @@ The plugin will test global variables for changes as appropriate.</p>
 <p class="commandline">int select_p_job_test (struct job_record *job_ptr,
 bitstr_t *bitmap, int min_nodes, int max_nodes, int req_nodes, int mode,
 List preemption_candidates, List *preempted_jobs);</p>
-<p style="margin-left:.2in"><b>Description</b>: Given a job's scheduling requirement 
-specification and a set of nodes which might  be used to satisfy the request, identify 
-the nodes which "best" satisfy the request. Note that nodes being considered for allocation 
-to the job may include nodes already allocated to other jobs, even if node sharing is 
-not permitted. This is done to ascertain whether or not job may be allocated resources 
-at some later time (when the other jobs complete). This permits SLURM to reject 
-non-runnable jobs at submit time rather than after they have spent hours queued. 
-Informing users of problems at job submission time permits them to quickly resubmit 
+<p style="margin-left:.2in"><b>Description</b>: Given a job's scheduling requirement
+specification and a set of nodes which might  be used to satisfy the request, identify
+the nodes which "best" satisfy the request. Note that nodes being considered for allocation
+to the job may include nodes already allocated to other jobs, even if node sharing is
+not permitted. This is done to ascertain whether or not job may be allocated resources
+at some later time (when the other jobs complete). This permits SLURM to reject
+non-runnable jobs at submit time rather than after they have spent hours queued.
+Informing users of problems at job submission time permits them to quickly resubmit
 the job with appropriate constraints.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer 
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being considered for scheduling. Data in this job record may safely be read.
-Data of particular interest include <i>details->contiguous</i> (set if allocated nodes 
-should be contiguous), <i>num_procs</i> (minimum processors in allocation) and 
+Data of particular interest include <i>details->contiguous</i> (set if allocated nodes
+should be contiguous), <i>num_procs</i> (minimum processors in allocation) and
 <i>details->req_node_bitmap</i> (specific required nodes).<br><br>
-<span class="commandline"> bitmap</span>&nbsp; &nbsp;&nbsp;(input/output) 
+<span class="commandline"> bitmap</span>&nbsp; &nbsp;&nbsp;(input/output)
 bits representing nodes which might be allocated to the job are set on input.
-This function should clear the bits representing nodes not required to satisfy 
+This function should clear the bits representing nodes not required to satisfy
 job's scheduling request.
-Bits left set will represent nodes to be used for this job. Note that the job's 
-required nodes (<i>details->req_node_bitmap</i>) will be a superset 
+Bits left set will represent nodes to be used for this job. Note that the job's
+required nodes (<i>details->req_node_bitmap</i>) will be a superset
 <i>bitmap</i> when the function is called.<br><br>
-<span class="commandline"> min_nodes</span>&nbsp; &nbsp;&nbsp;(input) 
-minimum number of nodes to allocate to this job. Note this reflects both job 
+<span class="commandline"> min_nodes</span>&nbsp; &nbsp;&nbsp;(input)
+minimum number of nodes to allocate to this job. Note this reflects both job
 and partition specifications.<br><br>
-<span class="commandline"> max_nodes</span>&nbsp; &nbsp;&nbsp;(input) 
-maximum number of nodes to allocate to this job. Note this reflects both job 
+<span class="commandline"> max_nodes</span>&nbsp; &nbsp;&nbsp;(input)
+maximum number of nodes to allocate to this job. Note this reflects both job
 and partition specifications.<br><br>
 <span class="commandline"> req_nodes</span>&nbsp; &nbsp;&nbsp;(input)
 the requested (desired)  of nodes to allocate to this job. This reflects job's
 maximum node specification (if supplied).<br><br>
 <span class="commandline"> mode</span>&nbsp; &nbsp;&nbsp;(input)
-controls the mode of operation. Valid options are 
+controls the mode of operation. Valid options are
 SELECT_MODE_RUN_NOW: try to schedule job now<br>
 SELECT_MODE_TEST_ONLY: test if job can ever run<br>
 SELECT_MODE_WILL_RUN: determine when and where job can run<br><br>
 <span class="commandline"> preemption_candidates</span>&nbsp; &nbsp;&nbsp;(input)
-list of pointers to jobs which may be preempted in order to initiate this 
+list of pointers to jobs which may be preempted in order to initiate this
 pending job. May be NULL if there are no preemption candidates.<br><br>
 <span class="commandline"> preempted_jobs</span>&nbsp; &nbsp;&nbsp;(input/output)
 list of jobs which must be preempted in order to initiate the pending job.
 If the value is NULL, no job list is returned.
 If the list pointed to has a value of NULL, a new list will be created
-otherwise the existing list will be overwritten. 
-Use the <i>list_destroy</i> function to destroy the list when no longer 
+otherwise the existing list will be overwritten.
+Use the <i>list_destroy</i> function to destroy the list when no longer
 needed.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR and future attempts may be made to schedule 
+the plugin should return SLURM_ERROR and future attempts may be made to schedule
 the job.</p>
 
 <p class="commandline">int select_p_job_begin (struct job_record *job_ptr);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the initiation of the specified job
-is about to begin. This function is called immediately after 
+is about to begin. This function is called immediately after
 <span class="commandline">select_p_job_test()</span> successfully completes for this job.
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer 
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being initialized. Data in this job record may safely be read or written.
-The <i>nodes</i> and <i>node_bitmap</i> fields of this job record identify the 
-nodes which have already been selected for this job to use. For an example of 
+The <i>nodes</i> and <i>node_bitmap</i> fields of this job record identify the
+nodes which have already been selected for this job to use. For an example of
 a job record field that the plugin may write into, see <i>select_id</i>.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR, which causes the job to be requeued for 
+the plugin should return SLURM_ERROR, which causes the job to be requeued for
 later execution.</p>
 
 <p class="commandline">int select_p_job_ready (struct job_record *job_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Test if resources are configured 
-and ready for job execution. This function is only used in the job prolog for 
+<p style="margin-left:.2in"><b>Description</b>: Test if resources are configured
+and ready for job execution. This function is only used in the job prolog for
 BlueGene systems to determine if the bglblock has been booted and is ready for use.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being initialized. Data in this job record may safely be read.
 The <i>nodes</i> and <i>node_bitmap</i> fields of this job record identify the
 nodes which have already been selected for this job to use. </p>
-<p style="margin-left:.2in"><b>Returns</b>: 1 if the job may begin execution, 
+<p style="margin-left:.2in"><b>Returns</b>: 1 if the job may begin execution,
 0 otherwise.</p>
- 
+
 <p class="commandline">int select_p_job_fini (struct job_record *job_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note the termination of the 
-specified job. This function is called as the termination process for the 
+<p style="margin-left:.2in"><b>Description</b>: Note the termination of the
+specified job. This function is called as the termination process for the
 job begins (prior to killing the tasks).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer 
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being terminated. Data in this job record may safely be read or written.
-The <i>nodes</i> and/or <i>node_bitmap</i> fields of this job record identify the 
+The <i>nodes</i> and/or <i>node_bitmap</i> fields of this job record identify the
 nodes which were selected for this job to use.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
 <p class="commandline">int select_p_job_suspend (struct job_record *job_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Suspend the specified job. 
+<p style="margin-left:.2in"><b>Description</b>: Suspend the specified job.
 Release resources for use by other jobs.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the job being suspended. Data in this job record may safely be read or 
-written.  The <i>nodes</i> and/or <i>node_bitmap</i> fields of this job record 
+to the job being suspended. Data in this job record may safely be read or
+written.  The <i>nodes</i> and/or <i>node_bitmap</i> fields of this job record
 identify the nodes which were selected for this job to use.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
 failure, the plugin should return a SLURM error code.</p>
 
 <p class="commandline">int select_p_job_resume (struct job_record *job_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Resume the specified job 
+<p style="margin-left:.2in"><b>Description</b>: Resume the specified job
 which was previously suspended.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
@@ -355,7 +355,7 @@ struct job_record *job_ptr, void *data);</p>
 <p style="margin-left:.2in"><b>Description</b>: Get plugin-specific information
 about a job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies 
+<span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies
 the type of data to be updated.<br><br>
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer to
 the job related to the query (if applicable; may be NULL).<br><br>
@@ -369,9 +369,9 @@ the plugin should return SLURM_ERROR.</p>
 <span class="commandline">
 last_query_time</span>&nbsp;&nbsp;&nbsp;(input) time that the data was
 last saved.<br>
-<span class="commandline"> buffer_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) buffer into 
+<span class="commandline"> buffer_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) buffer into
 which the node data is appended.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful,
 SLURM_NO_CHANGE_IN_DATA if data has not changed since last packed, otherwise SLURM_ERROR</p>
 
 <p class="commandline">int select_p_get_select_nodeinfo(struct node_record *node_ptr,
@@ -390,11 +390,11 @@ the plugin should return SLURM_ERROR.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM node selection API. Future 
-releases of SLURM may revise this API. A node selection plugin conveys its ability 
-to implement a particular API version using the mechanism outlined for SLURM plugins. 
-In addition, the credential is transmitted along with the version number of the 
-plugin that transmitted it. It is at the discretion of the plugin author whether 
+<p> This document describes version 1 of the SLURM node selection API. Future
+releases of SLURM may revise this API. A node selection plugin conveys its ability
+to implement a particular API version using the mechanism outlined for SLURM plugins.
+In addition, the credential is transmitted along with the version number of the
+plugin that transmitted it. It is at the discretion of the plugin author whether
 to maintain data format compatibility across different versions of the plugin.</p>
 
 <p class="footer"><a href="#top">top</a></p>
diff --git a/doc/html/slurm.sc08.bof.pdf b/doc/html/slurm.sc08.bof.pdf
index 39611c34f438f0dc260e7d68d8c21923de3dd82b..01a79e26bc8a876d56f20ccd6450c51d6fd5afa5 100644
Binary files a/doc/html/slurm.sc08.bof.pdf and b/doc/html/slurm.sc08.bof.pdf differ
diff --git a/doc/html/slurm.sc08.status.pdf b/doc/html/slurm.sc08.status.pdf
index 51dfea8dab2ccee655b906a4470e738197e97be4..3f71342e36a28c74498d8c6ffbc5b643397274d1 100644
Binary files a/doc/html/slurm.sc08.status.pdf and b/doc/html/slurm.sc08.status.pdf differ
diff --git a/doc/html/slurm.shtml b/doc/html/slurm.shtml
index c673a24fbd8e560c3c5b0763b3e84772328fb05f..0142122329d48b3980a19394710ab98278f900ca 100644
--- a/doc/html/slurm.shtml
+++ b/doc/html/slurm.shtml
@@ -2,56 +2,56 @@
 
 <h1>SLURM: A Highly Scalable Resource Manager</h1>
 
-<p>SLURM is an open-source resource manager designed for Linux clusters of 
-all sizes. 
-It provides three key functions. 
-First it allocates exclusive and/or non-exclusive access to resources 
-(computer nodes) to users for some duration of time so they can perform work. 
-Second, it provides a framework for starting, executing, and monitoring work 
-(typically a parallel job) on a set of allocated nodes. 
-Finally, it arbitrates contention for resources by managing a queue of 
+<p>SLURM is an open-source resource manager designed for Linux clusters of
+all sizes.
+It provides three key functions.
+First it allocates exclusive and/or non-exclusive access to resources
+(computer nodes) to users for some duration of time so they can perform work.
+Second, it provides a framework for starting, executing, and monitoring work
+(typically a parallel job) on a set of allocated nodes.
+Finally, it arbitrates contention for resources by managing a queue of
 pending work. </p>
 
 <p>SLURM's design is very modular with dozens of optional plugins.
-In its simplest configuration, it can be installed and configured in a 
+In its simplest configuration, it can be installed and configured in a
 couple of minutes (see <a href="http://www.linux-mag.com/id/7239/1/">
-Caos NSA and Perceus: All-in-one Cluster Software Stack</a> 
+Caos NSA and Perceus: All-in-one Cluster Software Stack</a>
 by Jeffrey B. Layton).
-More complex configurations rely upon a 
-<a href="http://www.mysql.com/">MySQL</a> database for archiving 
-<a href="accounting.html">accounting</a> records, managing 
-<a href="resource_limits.html">resource limits</a> by user or bank account, 
-or supporting sophisticated 
+More complex configurations rely upon a
+<a href="http://www.mysql.com/">MySQL</a> database for archiving
+<a href="accounting.html">accounting</a> records, managing
+<a href="resource_limits.html">resource limits</a> by user or bank account,
+or supporting sophisticated
 <a href="priority_multifactor.html">job prioritization</a> algorithms.
-SLURM also provides an Applications Programming Interface (API) for 
-integration with external schedulers such as 
+SLURM also provides an Applications Programming Interface (API) for
+integration with external schedulers such as
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-The Maui Scheduler</a> or 
+The Maui Scheduler</a> or
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
 Moab Cluster Suite</a>.</p>
 
-<p>While other resource managers do exist, SLURM is unique in several 
-respects: 
+<p>While other resource managers do exist, SLURM is unique in several
+respects:
 <ul>
-<li>Its source code is freely available under the 
+<li>Its source code is freely available under the
 <a href="http://www.gnu.org/licenses/gpl.html">GNU General Public License</a>.</li>
 <li>It is designed to operate in a heterogeneous cluster with up to 65,536 nodes
 and hundreds of thousands of processors.</li>
-<li>It is portable; written in C with a GNU autoconf configuration engine. 
-While initially written for Linux, other UNIX-like operating systems should 
+<li>It is portable; written in C with a GNU autoconf configuration engine.
+While initially written for Linux, other UNIX-like operating systems should
 be easy porting targets.</li>
-<li>SLURM is highly tolerant of system failures, including failure of the node 
+<li>SLURM is highly tolerant of system failures, including failure of the node
 executing its control functions.</li>
-<li>A plugin mechanism exists to support various interconnects, authentication 
+<li>A plugin mechanism exists to support various interconnects, authentication
 mechanisms, schedulers, etc. These plugins are documented and  simple enough for the motivated end user to understand the source and add functionality.</li>
 </ul></p>
 
 <p>SLURM provides resource management on about 1000 computers worldwide,
 including many of the most powerful computers in the world:
 <ul>
-<li><a href="https://asc.llnl.gov/computing_resources/bluegenel/">BlueGene/L</a> 
+<li><a href="https://asc.llnl.gov/computing_resources/bluegenel/">BlueGene/L</a>
 at LLNL with 106,496 dual-core processors</li>
-<li><a href="http://c-r-labs.com/">EKA</a> at Computational Research Laboratories, 
+<li><a href="http://c-r-labs.com/">EKA</a> at Computational Research Laboratories,
 India with 14,240 Xeon processors and Infiniband interconnect</li>
 <li><a href="https://asc.llnl.gov/computing_resources/purple/">ASC Purple</a>
 an IBM SP/AIX cluster at LLNL with 12,208 Power5 processors and a Federation switch</li>
@@ -60,17 +60,17 @@ a Linux cluster at Barcelona Supercomputer Center
 with 10,240 PowerPC processors and a Myrinet switch</li>
 <li><a href="http://en.wikipedia.org/wiki/Anton_(computer)">Anton</a>
 a massively parallel supercomputer designed and built by
-<a href="http://www.deshawresearch.com/">D. E. Shaw Research</a> 
-for molecular dynamics simulation using 512 custom-designed ASICs 
+<a href="http://www.deshawresearch.com/">D. E. Shaw Research</a>
+for molecular dynamics simulation using 512 custom-designed ASICs
 and a three-dimensional torus interconnect </li>
-<li><a href="http://news.xinhuanet.com/english/2009-10/29/content_12356478.htm">Tianhe</a> at The National University of Defence Technology (NUDT) with 
+<li><a href="http://news.xinhuanet.com/english/2009-10/29/content_12356478.htm">Tianhe</a> at The National University of Defence Technology (NUDT) with
 6,144 Intel CPUs and 5,120 AMD GPUs.  Debuting as China's fastest super computer</li>
 </ul>
-<p>SLURM is actively being developed, distributed and supported by 
+<p>SLURM is actively being developed, distributed and supported by
 <a href="https://www.llnl.gov">Lawrence Livermore National Laboratory</a>,
-<a href="http://www.hp.com">Hewlett-Packard</a> and 
+<a href="http://www.hp.com">Hewlett-Packard</a> and
 <a href="http://www.bull.com">Bull</a>.
-It is also distributed and supported by 
+It is also distributed and supported by
 <a href="http://www.clusterresources.com">Cluster Resources</a>,
 <a href="http://www.sicortex.com">SiCortex</a>,
 <a href="http://www.infiscale.com">Infiscale</a>,
diff --git a/doc/html/slurm_banner.gif b/doc/html/slurm_banner.gif
index 3a6d6d517ff0340764b5a687ae5fde4fed4d598c..31e6e5425a31b9f8922b0cdf9a36fe1af5cb3e08 100644
Binary files a/doc/html/slurm_banner.gif and b/doc/html/slurm_banner.gif differ
diff --git a/doc/html/slurm_design.pdf b/doc/html/slurm_design.pdf
index fcfc634c73d9c00ecba624d3754a690c31768c23..e8abcab7808757a95033d493c390154aea1e68e3 100644
Binary files a/doc/html/slurm_design.pdf and b/doc/html/slurm_design.pdf differ
diff --git a/doc/html/slurm_moab.pdf b/doc/html/slurm_moab.pdf
index 576a8ae0adb60548e3144b41256a8e32a5f5e80f..19c4b9263f82e974fe62ba5fad8a75110e0fb33f 100644
Binary files a/doc/html/slurm_moab.pdf and b/doc/html/slurm_moab.pdf differ
diff --git a/doc/html/slurm_v1.2.pdf b/doc/html/slurm_v1.2.pdf
index 7a3770cc05b8fd656c50efb2058cd6ec2e25ae21..0b7d262c5a40f00ca0a04d1f200092f903a901c8 100644
Binary files a/doc/html/slurm_v1.2.pdf and b/doc/html/slurm_v1.2.pdf differ
diff --git a/doc/html/slurm_v1.3.pdf b/doc/html/slurm_v1.3.pdf
index 1068bcffac4c388f4398549242e026640892fdff..5e1dc2757def891d023bde2c5e105f3861bf13dd 100644
Binary files a/doc/html/slurm_v1.3.pdf and b/doc/html/slurm_v1.3.pdf differ
diff --git a/doc/html/slurmstyles.css b/doc/html/slurmstyles.css
index 2505e4180485dfbc1c662123476e5b97738c4a73..77f0e94b353031e828d20ab3bba5f7caf61a97f1 100644
--- a/doc/html/slurmstyles.css
+++ b/doc/html/slurmstyles.css
@@ -23,7 +23,7 @@ a:visited {
   color: #581C90;
 }
 
-a.nav:visited { 
+a.nav:visited {
   /*color: #6633FF;*/
   color: #581C90;
 }
diff --git a/doc/html/sponsors.gif b/doc/html/sponsors.gif
index 2d5bf921dd688b3eed3798e824fd9cfccecc16f9..35dfc08a698a9c725c359a0e800f37ee97b45342 100644
Binary files a/doc/html/sponsors.gif and b/doc/html/sponsors.gif differ
diff --git a/doc/html/sun_const.shtml b/doc/html/sun_const.shtml
index 5a31fe7fe86969d133fce1185923e39001a4b9cb..25830b50bfb7ddb2ca14be32ea7824a88785b919 100644
--- a/doc/html/sun_const.shtml
+++ b/doc/html/sun_const.shtml
@@ -6,108 +6,108 @@
 
 <p>This document describes the unique features of SLURM on
 Sun Constellation computers.
-You should be familiar with the SLURM's mode of operation on Linux clusters 
-before studying the relatively few differences in Sun Constellation system 
+You should be familiar with the SLURM's mode of operation on Linux clusters
+before studying the relatively few differences in Sun Constellation system
 operation described in this document.</p>
 
 <p>SLURM's primary mode of operation is designed for use on clusters with
-nodes configured in a one-dimensional space. 
-A topology plugin was developed to optimize resource allocations in 
+nodes configured in a one-dimensional space.
+A topology plugin was developed to optimize resource allocations in
 three dimensions.
-Changes were also made for hostlist parsing to support hostnames 
+Changes were also made for hostlist parsing to support hostnames
 of an appropriate format.</p>
 
 <h2>Configuration</h2>
 
-<p>Two variables must be defined in the <i>config.h</i> file: 
-<i>HAVE_SUN_CONST</i> and <i>SYSTEM_DIMENSIONS=4</i> 
+<p>Two variables must be defined in the <i>config.h</i> file:
+<i>HAVE_SUN_CONST</i> and <i>SYSTEM_DIMENSIONS=4</i>
 (more on that value later).
-This can be accomplished in several different ways depending upon how 
+This can be accomplished in several different ways depending upon how
 SLURM is being built.
 <ol>
-<li>Execute the <i>configure</i> command with the option 
+<li>Execute the <i>configure</i> command with the option
 <i>--enable-sun-const</i> <b>OR</b></li>
-<li>Execute the <i>rpmbuild</i> command with the option 
+<li>Execute the <i>rpmbuild</i> command with the option
 <i>--with sun_const</i> <b>OR</b></li>
 <li>Add <i>%with_sun_const 1</i> to your <i>~/.rpmmacros</i> file.</li>
 </ol></p>
 
-<p>Node names must have a four-digit suffix describing identifying their 
+<p>Node names must have a four-digit suffix describing identifying their
 location (this is why SYSTEM_DIMENSIONS is configured to be 4).
-The first three digits specify the node's zero-origin position in the 
+The first three digits specify the node's zero-origin position in the
 X-, Y- and Z-dimension respectively.
 This is followed by one digit specifying the node's sequence number
-at that coordinate (e.g. "tux0123" for X=0, Y=1, Z=2, sequence_number=3; 
-"tux1234" for X=1, Y=2, Z=3, sequence_number=4). 
+at that coordinate (e.g. "tux0123" for X=0, Y=1, Z=2, sequence_number=3;
+"tux1234" for X=1, Y=2, Z=3, sequence_number=4).
 The coordinate location should be zero-origin (starting at X=0, Y=0, Z=0).
 The sequence number should also start at zero and can include upper
-case letters for higher values, for up to 36 nodes at a specific coordinate 
+case letters for higher values, for up to 36 nodes at a specific coordinate
 (e.g. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, ... Z).
-To avoid confusion, we recommend that the node name prefix consist of 
+To avoid confusion, we recommend that the node name prefix consist of
 lower case letters.
-Numerically sequential node names may specified by in SLURM 
-commands and configuration files using the system name prefix with the 
-end-points enclosed in square brackets and separated by an "-". 
-For example "tux[0000-000B]" is used to represent the twelve nodes 
+Numerically sequential node names may specified by in SLURM
+commands and configuration files using the system name prefix with the
+end-points enclosed in square brackets and separated by an "-".
+For example "tux[0000-000B]" is used to represent the twelve nodes
 with sequence numbers from 0 to B, all at coordinate X=0, Y=0 and Z=0.
-Alternately, rectangular prisms of node names can be specified using the 
-system name prefix with the end-points enclosed in square brackets and 
-separated by an "x". 
-For example "tux[0000x0111]" is used to represent the eight nodes in a 
-block with endpoints at "tux0000" and "tux0111" (tux0000, tux0001, tux0010, 
+Alternately, rectangular prisms of node names can be specified using the
+system name prefix with the end-points enclosed in square brackets and
+separated by an "x".
+For example "tux[0000x0111]" is used to represent the eight nodes in a
+block with endpoints at "tux0000" and "tux0111" (tux0000, tux0001, tux0010,
 tux0011, tux0100, tux0101, tux0110 and tux0111).
-Viewed another way, these eight nodes have sequence numbers 0 or 1 
+Viewed another way, these eight nodes have sequence numbers 0 or 1
 and have four distinct coordinates (000, 001, 010 and 011).
 While node names of this form are required for SLURM's internal use,
-it need not be the name returned by the <i>hostlist -s</i> command. 
+it need not be the name returned by the <i>hostlist -s</i> command.
 See <i>man slurm.conf</i> for details on how to use the <i>NodeName</i>,
-<i>NodeAddr</i> and <i>NodeHostName</i> configuration parameters 
+<i>NodeAddr</i> and <i>NodeHostName</i> configuration parameters
 for flexibility in this matter.</p>
 
-<p>Next you need to select from two options for the resource selection 
+<p>Next you need to select from two options for the resource selection
 plugin (the <i>SelectType</i> option in SLURM's <i>slurm.conf</i> configuration
 file):
 <ol>
-<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a 
+<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a
 one-dimensional space to allocate whole nodes, sockets, or cores to jobs
 based upon other configuration parameters.</li>
-<li><b>select/linear</b> - Performs a best-fit algorithm based upon a 
+<li><b>select/linear</b> - Performs a best-fit algorithm based upon a
 one-dimensional space to allocate whole nodes to jobs.</li>
 </ol>
 
-<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to 
-allocate resources physically nearby in four-dimensional space, the 
-nodes be specified in SLURM's <i>slurm.conf</i> configuration file in 
+<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to
+allocate resources physically nearby in four-dimensional space, the
+nodes be specified in SLURM's <i>slurm.conf</i> configuration file in
 such a fashion that those nearby in <i>slurm.conf</i> (managed
-internal to SLURM as a one-dimensional space) are also nearby in 
+internal to SLURM as a one-dimensional space) are also nearby in
 the physical four-dimensional space.</p>
 
-<p>SLURM can automatically perform that conversion using a 
+<p>SLURM can automatically perform that conversion using a
 <a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>.
-Set <i>TopologyPlugin=topology/3d_torus</i> in SLURM's <i>slurm.conf</i> 
-configuration file for nodes to be reordered appropriately. 
+Set <i>TopologyPlugin=topology/3d_torus</i> in SLURM's <i>slurm.conf</i>
+configuration file for nodes to be reordered appropriately.
 First a three-dimensional Hilbert curve is constructed through all
 coordinates in the system such that every coordinate in the order list
-physically adjacent. 
+physically adjacent.
 The node list are then re-ordered following that Hilbert curve while
-maintaining the node's sequence number (i.e. not building a Hilbert 
+maintaining the node's sequence number (i.e. not building a Hilbert
 curve through that fourth dimension).
-If the number of nodes at each coordinate varies, it may be necessary to 
+If the number of nodes at each coordinate varies, it may be necessary to
 put a separate node definition line in the <i>slurm.conf</i> file.
 If that is the case, put them in numeric order for the <i>topology/3d_torus</i>
 plugin to function properly.<p>
 
-<p>Alternately configure <i>TopologyPlugin=topology/none</i> and 
+<p>Alternately configure <i>TopologyPlugin=topology/none</i> and
 construct your own node ordering sequence as desired in <i>slurm.conf</i>.
 Note that each node must be listed exactly once and consecutive
-nodes should be nearby in three-dimensional space. 
-The open source code used by SLURM to generate the Hilbert curve is 
+nodes should be nearby in three-dimensional space.
+The open source code used by SLURM to generate the Hilbert curve is
 included in the distribution at <i>contribs/skilling.c</i> in the event
 that you wish to experiment with it to generate your own node ordering.
 Two examples of SLURM configuration files are shown below:</p>
 
 <pre>
-# slurm.conf for Sun Constellation system of size 4x4x4 
+# slurm.conf for Sun Constellation system of size 4x4x4
 # with eight nodes at each coordinate (512 nodes total)
 
 # Configuration parameters removed here
@@ -136,19 +136,19 @@ PartitionName=debug Nodes=tux[0000-0007,0010-001B,0100-0107,0110-0115]
 
 <h2>Tools</h2>
 
-<p>The node names output by the <i>scontrol show nodes</i> command 
+<p>The node names output by the <i>scontrol show nodes</i> command
 will be ordered as defined (sequentially along the Hilbert curve)
-rather than in numeric order (e.g. "tux0010" may follow "tux1010" rather 
-than "tux0000"). 
-The output of the <i>smap</i> and <i>sview</i> commands will also display 
-nodes ordered by the Hilbert curve so that nodes appearing adjacent in the 
-display will be physically adjacent. 
-This permits the locality of a job, partition or reservation to be easily 
+rather than in numeric order (e.g. "tux0010" may follow "tux1010" rather
+than "tux0000").
+The output of the <i>smap</i> and <i>sview</i> commands will also display
+nodes ordered by the Hilbert curve so that nodes appearing adjacent in the
+display will be physically adjacent.
+This permits the locality of a job, partition or reservation to be easily
 determined.
-In order to locate specific nodes with the <i>sview</i> command, select 
-<i>Actions</i>, <i>Search</i> and <i>Node(s) Name</i> then enter the desired 
+In order to locate specific nodes with the <i>sview</i> command, select
+<i>Actions</i>, <i>Search</i> and <i>Node(s) Name</i> then enter the desired
 node names.
-The output of other SLURM commands (e.g. <i>sinfo</i> and <i>squeue</i>) 
+The output of other SLURM commands (e.g. <i>sinfo</i> and <i>squeue</i>)
 will use a SLURM hostlist expression with the node names numerically ordered).
 SLURM partitions should contain nodes which are defined sequentially
 by that ordering for optimal performance.</p>
diff --git a/doc/html/switchplugins.shtml b/doc/html/switchplugins.shtml
index 3f8f65da7b57b61fb2f6780a72fbec3f18eee042..00a0a5d774a408f3e85910f47fd694f524c68932 100644
--- a/doc/html/switchplugins.shtml
+++ b/doc/html/switchplugins.shtml
@@ -3,53 +3,53 @@
 <h1><a name="top">SLURM Switch Plugin API</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM switch (interconnect) plugins and the API that defines 
-them. It is intended as a resource to programmers wishing to write their own SLURM 
+<p> This document describes SLURM switch (interconnect) plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own SLURM
 switch plugins. This is version 0 of the API.
 Note that many of the API functions are used only by one of the daemons. For
-example the slurmctld daemon builds a job step's switch credential  
+example the slurmctld daemon builds a job step's switch credential
 (<span class="commandline">switch_p_build_jobinfo</span>) while the
-slurmd daemon enables and disables that credential for the job step's 
-tasks on a particular node(<span class="commandline">switch_p_job_init</span>, 
+slurmd daemon enables and disables that credential for the job step's
+tasks on a particular node(<span class="commandline">switch_p_job_init</span>,
 etc.). </p>
 
 <p>SLURM switch plugins are SLURM plugins that implement the SLURM switch or interconnect
-API described herein. They must conform to the SLURM Plugin API with the following 
+API described herein. They must conform to the SLURM Plugin API with the following
 specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;switch.&quot; The minor type can be any recognizable 
+The major type must be &quot;switch.&quot; The minor type can be any recognizable
 abbreviation for the type of switch. We recommend, for example:</p>
 <ul>
-<li><b>none</b>&#151;A plugin that implements the API without providing any actual 
+<li><b>none</b>&#151;A plugin that implements the API without providing any actual
 switch service. This is the case for Ethernet and Myrinet interconnects.</li>
-<li><b>elan</b>&#151;<a href="http://www.quadrics.com/">Quadrics</a>  Elan3 or Elan4 
+<li><b>elan</b>&#151;<a href="http://www.quadrics.com/">Quadrics</a>  Elan3 or Elan4
 interconnect.</li>
 <li><b>federation</b>&#151;IBM Federation interconnects (presently under development).</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for switch support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for switch support.
 Note carefully, however, the versioning discussion below.</p>
-<p>The programmer is urged to study 
+<p>The programmer is urged to study
 <span class="commandline">src/plugins/switch/switch_elan.c</span> and
-<span class="commandline">src/plugins/switch/switch_none.c</span> 
+<span class="commandline">src/plugins/switch/switch_none.c</span>
 for sample implementations of a SLURM switch plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
 <p> The implementation must support two opaque data classes.
-One is used as an job's switch &quot;credential.&quot; 
-This class must encapsulate all job-specific information necessary 
-for the operation of the API specification below. 
+One is used as an job's switch &quot;credential.&quot;
+This class must encapsulate all job-specific information necessary
+for the operation of the API specification below.
 The second is a node's switch state record.
-Both data classes are referred to in SLURM code using an anonymous 
+Both data classes are referred to in SLURM code using an anonymous
 pointer (void *).</p>
 
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <span class="commandline">errno</span>  to allow SLURM to discover 
-as practically as possible the reason for any failed API call. Plugin-specific enumerated 
-integer values should be used when appropriate. It is desirable that these values 
-be mapped into the range ESLURM_SWITCH_MIN and ESLURM_SWITCH_MAX 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <span class="commandline">errno</span>  to allow SLURM to discover
+as practically as possible the reason for any failed API call. Plugin-specific enumerated
+integer values should be used when appropriate. It is desirable that these values
+be mapped into the range ESLURM_SWITCH_MIN and ESLURM_SWITCH_MAX
 as defined in <span class="commandline">slurm/slurm_errno.h</span>.
 The error number should be returned by the function
 <a href="#get_errno"><span class="commandline">switch_p_get_errno()</span></a>
@@ -57,49 +57,49 @@ and this error number can be converted to an appropriate string description usin
 <a href="#strerror"><span class="commandline">switch_p_strerror()</span></a>
 function described below.</p>
 
-<p>These values must not be used as return values in integer-valued functions 
-in the API. The proper error return value from integer-valued functions is SLURM_ERROR. 
-The implementation should endeavor to provide useful and pertinent information by 
-whatever means is practical. In some cases this means an errno for each credential, 
-since plugins must be re-entrant. If a plugin maintains a global errno in place of or in 
-addition to a per-credential errno, it is not required to enforce mutual exclusion on it. 
-Successful API calls are not required to reset any errno to a known value. However, 
-the initial value of any errno, prior to any error condition arising, should be 
+<p>These values must not be used as return values in integer-valued functions
+in the API. The proper error return value from integer-valued functions is SLURM_ERROR.
+The implementation should endeavor to provide useful and pertinent information by
+whatever means is practical. In some cases this means an errno for each credential,
+since plugins must be re-entrant. If a plugin maintains a global errno in place of or in
+addition to a per-credential errno, it is not required to enforce mutual exclusion on it.
+Successful API calls are not required to reset any errno to a known value. However,
+the initial value of any errno, prior to any error condition arising, should be
 SLURM_SUCCESS. </p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
 <h3>Global Switch State Functions</h3>
 <p class="commandline">int switch_p_libstate_save (char *dir_name);</p>
-<p style="margin-left:.2in"><b>Description</b>: Save any global switch state to a file 
-within the specified directory. The actual file name used is plugin specific. It is recommended 
-that the global switch state contain a magic number for validation purposes. This function 
-is called by the slurmctld deamon on shutdown. Note that if the slurmctld daemon fails, 
-this function will not be called. The plugin may save state independently and/or make 
+<p style="margin-left:.2in"><b>Description</b>: Save any global switch state to a file
+within the specified directory. The actual file name used is plugin specific. It is recommended
+that the global switch state contain a magic number for validation purposes. This function
+is called by the slurmctld deamon on shutdown. Note that if the slurmctld daemon fails,
+this function will not be called. The plugin may save state independently and/or make
 use of the switch_p_job_step_allocated function to restore state.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp;
 &nbsp;&nbsp;(input) fully-qualified pathname of a directory into which user SlurmUser (as defined
 in slurm.conf) can create a file and write state information into that file. Cannot be NULL.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_libstate_restore(char *dir_name, bool recover);</p>
-<p style="margin-left:.2in"><b>Description</b>: Restore any global switch state from a file 
-within the specified directory. The actual file name used is plugin specific. It is recommended 
-that any magic number associated with the global switch state be verified. This function 
+<p style="margin-left:.2in"><b>Description</b>: Restore any global switch state from a file
+within the specified directory. The actual file name used is plugin specific. It is recommended
+that any magic number associated with the global switch state be verified. This function
 is called by the slurmctld deamon on startup.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> dir_name</span>&nbsp; 
-&nbsp;&nbsp;(input) fully-qualified pathname of a directory containing a state information file 
+<span class="commandline"> dir_name</span>&nbsp;
+&nbsp;&nbsp;(input) fully-qualified pathname of a directory containing a state information file
 from which user SlurmUser (as defined in slurm.conf) can read. Cannot be NULL.<br>
 <span class="commandline"><span class="commandline"> recover</span>&nbsp;
 true of restart with state preserved, false if no state recovery. </p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_libstate_clear (void);</p>
@@ -112,22 +112,22 @@ to indicate the reason for failure.</p>
 
 <h3>Node's Switch State Monitoring Functions</h3>
 
-<p>Nodes will register with current switch state information when the slurmd daemon 
-is initiated. The slurmctld daemon will also request that slurmd supply current 
+<p>Nodes will register with current switch state information when the slurmd daemon
+is initiated. The slurmctld daemon will also request that slurmd supply current
 switch state information on a periodic basis.</p>
 
 <p class="commandline">int switch_p_clear_node_state (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Initialize node state. 
-If any switch state has previously been established for a job, it will be cleared. 
-This will be used to establish a "clean" state for the switch on the node upon 
+<p style="margin-left:.2in"><b>Description</b>: Initialize node state.
+If any switch state has previously been established for a job, it will be cleared.
+This will be used to establish a "clean" state for the switch on the node upon
 which it is executed.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_alloc_node_info(switch_node_info_t *switch_node);</p>
-<p style="margin-left:.2in"><b>Description</b>: Allocate storage for a node's switch 
-state record. It is recommended that the record contain a magic number for validation 
+<p style="margin-left:.2in"><b>Description</b>: Allocate storage for a node's switch
+state record. It is recommended that the record contain a magic number for validation
 purposes.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_node</span>&nbsp;
 &nbsp;&nbsp;(output) location for writing location of node's switch state record.</p>
@@ -136,34 +136,34 @@ the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_build_node_info(switch_node_info_t switch_node);</p>
-<p style="margin-left:.2in"><b>Description</b>: Fill in a previously allocated switch state 
-record for the node on which this function is executed. 
+<p style="margin-left:.2in"><b>Description</b>: Fill in a previously allocated switch state
+record for the node on which this function is executed.
 It is recommended that the magic number be validated.
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_pack_node_info (switch_node_info_t switch_node, 
+<p class="commandline">int switch_p_pack_node_info (switch_node_info_t switch_node,
 Buf buffer);</p>
-<p style="margin-left:.2in"><b>Description</b>: Pack the data associated with a 
+<p style="margin-left:.2in"><b>Description</b>: Pack the data associated with a
 node's switch state into a buffer for network transmission.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_node</span>&nbsp; &nbsp;&nbsp;(input) an existing 
+<span class="commandline"> switch_node</span>&nbsp; &nbsp;&nbsp;(input) an existing
 node's switch state record.<br>
 <span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) buffer onto
 which the switch state information is appended.</p>
-<p style="margin-left:.2in"><b>Returns</b>: 
+<p style="margin-left:.2in"><b>Returns</b>:
 The number of bytes written should be returned if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_unpack_node_info (switch_node_info_t switch_node, 
+<p class="commandline">int switch_p_unpack_node_info (switch_node_info_t switch_node,
 Buf buffer);</p>
-<p style="margin-left:.2in"><b>Description</b>: Unpack the data associated with a 
+<p style="margin-left:.2in"><b>Description</b>: Unpack the data associated with a
 node's switch state record from a buffer.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_node</span>&nbsp; &nbsp;&nbsp;(input/output) a 
-previously allocated node switch state record to be filled in with data read from 
+<span class="commandline"> switch_node</span>&nbsp; &nbsp;&nbsp;(input/output) a
+previously allocated node switch state record to be filled in with data read from
 the buffer.<br>
 <span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) buffer from
 which the record's contents are read.</p>
@@ -172,21 +172,21 @@ the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">void switch_p_free_node_info (switch_node_info_t switch_node);</p>
-<p style="margin-left:.2in"><b>Description</b>: Release the storage associated with 
+<p style="margin-left:.2in"><b>Description</b>: Release the storage associated with
 a node's switch state record.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_node</span>&nbsp;
 &nbsp;&nbsp;(intput/output) a previously allocated node switch state record.</p>
 <p style="margin-left:.2in"><b>Returns</b>: None</p>
 
-<p class="commandline">char * switch_p_sprintf_node_info (switch_node_info_t switch_node, 
+<p class="commandline">char * switch_p_sprintf_node_info (switch_node_info_t switch_node,
 char *buf, size_t size);</p>
-<p style="margin-left:.2in"><b>Description</b>: Print the contents of a node's switch state 
+<p style="margin-left:.2in"><b>Description</b>: Print the contents of a node's switch state
 record to a buffer.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_node</span>&nbsp; &nbsp;&nbsp;(input) a 
+<span class="commandline"> switch_node</span>&nbsp; &nbsp;&nbsp;(input) a
 node's switch state record.<br>
 <span class="commandline"> buf</span>&nbsp; &nbsp;&nbsp;(input/output) point to
-buffer into which the switch state record is to be written.<br> 
+buffer into which the switch state record is to be written.<br>
 of buf in bytes.<br>
 <span class="commandline"> size</span>&nbsp; &nbsp;&nbsp;(input) size
 of buf in bytes.</p>
@@ -196,38 +196,38 @@ of buf in bytes.</p>
 <p class="commandline">int switch_p_alloc_jobinfo(switch_jobinfo_t *switch_job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Allocate storage for a job's switch credential.
 It is recommended that the credential contain a magic number for validation purposes.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_job</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_job</span>&nbsp;
 &nbsp;&nbsp;(output) location for writing location of job's switch credential.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_build_jobinfo (switch_jobinfo_t switch_job, 
+<p class="commandline">int switch_p_build_jobinfo (switch_jobinfo_t switch_job,
 char *nodelist, int *tasks_per_node, int cyclic_alloc, char *network);</p>
 <p style="margin-left:.2in"><b>Description</b>: Build a job's switch credential.
 It is recommended that the credential's magic number be validated.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline">switch_job</span>&nbsp; &nbsp;&nbsp;(input/output) Job's 
+<span class="commandline">switch_job</span>&nbsp; &nbsp;&nbsp;(input/output) Job's
 switch credential to be updated<br>
-<span class="commandline">nodelist</span>&nbsp;&nbsp;&nbsp; (input) List of nodes 
-allocated to the job. This may contain expressions to specify node ranges (e.g. 
+<span class="commandline">nodelist</span>&nbsp;&nbsp;&nbsp; (input) List of nodes
+allocated to the job. This may contain expressions to specify node ranges (e.g.
 "linux[1-20]" or "linux[2,4,6,8]").<br>
 <span class="commandline">tasks_per_node</span>&nbsp;&nbsp;&nbsp; (input) List
 of processes per node to be initiated as part of the job.<br>
-<span class="commandline">cyclic_alloc</span>&nbsp;&nbsp;&nbsp; (input) Non-zero 
-if job's processes are to be allocated across nodes in a cyclic fashion (task 0 on node 0, 
-task 1 on node 1, etc). If zero, processes are allocated sequentially on a node before 
+<span class="commandline">cyclic_alloc</span>&nbsp;&nbsp;&nbsp; (input) Non-zero
+if job's processes are to be allocated across nodes in a cyclic fashion (task 0 on node 0,
+task 1 on node 1, etc). If zero, processes are allocated sequentially on a node before
 moving to the next node (tasks 0 and 1 on node 0, tasks 2 and 3 on node 1, etc.).<br>
 <span class="commandline">network</span>&nbsp;&nbsp;&nbsp; (input) Job's network
 specification from srun command. </p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">switch_jobinfo_t switch_p_copy_jobinfo  (switch_jobinfo_t switch_job);</p>
-<p style="margin-left:.2in"><b>Description</b>: Allocate storage for a job's switch credential 
+<p style="margin-left:.2in"><b>Description</b>: Allocate storage for a job's switch credential
 and copy an existing credential to that location.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_job</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_job</span>&nbsp;
 &nbsp;&nbsp;(input) an existing job switch credential.</p>
 <p style="margin-left:.2in"><b>Returns</b>: A newly allocated job switch credential containing a
 copy of the function argument.</p>
@@ -235,69 +235,69 @@ copy of the function argument.</p>
 <p class="commandline">void switch_p_free_jobinfo (switch_jobinfo_t switch_job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Release the storage associated with a job's
  switch credential.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_job</span>&nbsp; 
+<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> switch_job</span>&nbsp;
 &nbsp;&nbsp;(intput) an existing job switch credential.</p>
 <p style="margin-left:.2in"><b>Returns</b>: None</p>
 
 <p class="commandline">int switch_p_pack_jobinfo (switch_jobinfo_t switch_job, Buf buffer);</p>
-<p style="margin-left:.2in"><b>Description</b>: Pack the data associated with a job's 
+<p style="margin-left:.2in"><b>Description</b>: Pack the data associated with a job's
 switch credential into a buffer for network transmission.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) an existing job 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) an existing job
 switch credential.<br>
-<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) buffer onto 
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) buffer onto
 which the credential's contents are appended.</p>
-<p style="margin-left:.2in"><b>Returns</b>: 
-The number of bytes written should be returned if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>:
+The number of bytes written should be returned if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_unpack_jobinfo (switch_jobinfo_t switch_job, Buf buffer);</p>
-<p style="margin-left:.2in"><b>Description</b>: Unpack the data associated with a job's 
+<p style="margin-left:.2in"><b>Description</b>: Unpack the data associated with a job's
 switch credential from a buffer.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input/output) a previously
 allocated job switch credential to be filled in with data read from the buffer.<br>
-<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) buffer from 
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) buffer from
 which the credential's contents are read.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_get_jobinfo (switch_jobinfo_t switch_job, int data_type, void *data);</p>
 <p style="margin-left:.2in"><b>Description</b>: Get some specific data from a job's switch credential.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's switch credential.<br>
-<span class="commandline"> data_type</span>&nbsp; &nbsp;&nbsp;(input) identification 
+<span class="commandline"> data_type</span>&nbsp; &nbsp;&nbsp;(input) identification
 as to the type of data requested. The interpretation of this value is plugin dependent.<br>
-<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) filled in with the desired 
+<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) filled in with the desired
 data. The form of this data is dependent upon the value of data_type and the plugin.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_job_step_complete (switch_jobinfo_t switch_job, 
+<p class="commandline">int switch_p_job_step_complete (switch_jobinfo_t switch_job,
 char *nodelist);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note that the job step associated 
+<p style="margin-left:.2in"><b>Description</b>: Note that the job step associated
 with the specified nodelist has completed execution.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span> &nbsp;&nbsp;&nbsp;(input) 
+<span class="commandline"> switch_job</span> &nbsp;&nbsp;&nbsp;(input)
 The completed job's switch credential.<br>
-<span class="commandline"> nodelist</span>&nbsp; &nbsp;&nbsp;(input) A list of nodes 
+<span class="commandline"> nodelist</span>&nbsp; &nbsp;&nbsp;(input) A list of nodes
 on which the job has completed. This may contain expressions to specify node ranges.
-(e.g. "linux[1-20]" or "linux[2,4,6,8]").</p> 
+(e.g. "linux[1-20]" or "linux[2,4,6,8]").</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_job_step_part_comp (switch_jobinfo_t switch_job,
 char *nodelist);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note that the job step has completed 
+<p style="margin-left:.2in"><b>Description</b>: Note that the job step has completed
 execution on the specified node list. The job step is not necessarily completed on all
-nodes, but switch resources associated with it on the specified nodes are no longer 
+nodes, but switch resources associated with it on the specified nodes are no longer
 in use.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span> &nbsp;&nbsp;&nbsp;(input) 
+<span class="commandline"> switch_job</span> &nbsp;&nbsp;&nbsp;(input)
 The completed job's switch credential.<br>
 <span class="commandline"> nodelist</span>&nbsp; &nbsp;&nbsp;(input) A list of nodes
 on which the job step has completed. This may contain expressions to specify node ranges.
@@ -307,32 +307,32 @@ the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">bool switch_p_part_comp (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Indicate if the switch plugin should 
+<p style="margin-left:.2in"><b>Description</b>: Indicate if the switch plugin should
 process partial job step completions (i.e. switch_g_job_step_part_comp). Support
-of partition completions is compute intensive, so it should be avoided unless switch 
+of partition completions is compute intensive, so it should be avoided unless switch
 resources are in short supply (e.g. switch/federation).</p>
-<p style="margin-left:.2in"><b>Returns</b>: True if partition step completions are 
+<p style="margin-left:.2in"><b>Returns</b>: True if partition step completions are
 to be recorded. False if only full job step completions are to be noted.</p>
 
 <p class="commandline">void switch_p_print_jobinfo(FILE *fp, switch_jobinfo_t switch_job);</p>
-<p style="margin-left:.2in"><b>Description</b>: Print the contents of a job's 
+<p style="margin-left:.2in"><b>Description</b>: Print the contents of a job's
 switch credential to a file.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> fp</span>&nbsp; &nbsp;&nbsp;(input) pointer to an open file.<br>
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.</p>
 <p style="margin-left:.2in"><b>Returns</b>: None.</p>
 
-<p class="commandline">char *switch_p_sprint_jobinfo(switch_jobinfo_t switch_job, 
+<p class="commandline">char *switch_p_sprint_jobinfo(switch_jobinfo_t switch_job,
 char *buf, size_t size);</p>
-<p style="margin-left:.2in"><b>Description</b>: Print the contents of a job's 
+<p style="margin-left:.2in"><b>Description</b>: Print the contents of a job's
 switch credential to a buffer.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.<br>
-<span class="commandline"> buf</span>&nbsp; &nbsp;&nbsp;(input/output) pointer to 
+<span class="commandline"> buf</span>&nbsp; &nbsp;&nbsp;(input/output) pointer to
 buffer into which the job credential information is to be written.<br>
-<span class="commandline"> size</span>&nbsp; &nbsp;&nbsp;(input) size of buf in 
+<span class="commandline"> size</span>&nbsp; &nbsp;&nbsp;(input) size of buf in
 bytes</p>
 <p style="margin-left:.2in"><b>Returns</b>: location of buffer, same as <i>buf</i>.</p>
 
@@ -343,10 +343,10 @@ switch credential.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.<br>
-<span class="commandline"> key</span>&nbsp; &nbsp;&nbsp;(input) identification 
-of the type of data to be retrieved from the switch credential. NOTE: The 
+<span class="commandline"> key</span>&nbsp; &nbsp;&nbsp;(input) identification
+of the type of data to be retrieved from the switch credential. NOTE: The
 interpretation of this key is dependent upon the switch type. <br>
-<span class="commandline"> resulting_data</span>&nbsp; &nbsp;&nbsp;(input/output) 
+<span class="commandline"> resulting_data</span>&nbsp; &nbsp;&nbsp;(input/output)
 pointer to where the requested data should be stored. </p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
@@ -356,125 +356,125 @@ to indicate the reason for failure.</p>
 
 <h3>Node Specific Switch Management Functions</h3>
 <p class="commandline">int switch_p_node_init (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is run from the top level slurmd 
+<p style="margin-left:.2in"><b>Description</b>: This function is run from the top level slurmd
 only once per slurmd run. It may be used, for instance, to perform some one-time
 interconnect setup or spawn an error handling thread.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> None</span></p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_node_fini (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is called once as slurmd exits 
+<p style="margin-left:.2in"><b>Description</b>: This function is called once as slurmd exits
 (slurmd will wait for this function to return before continuing the exit process).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> None</span></p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h3>Job Management Functions</h3>
 <pre>
 =========================================================================
-Process 1 (root)        Process 2 (root, user)  |  Process 3 (user task) 
-                                                |                        
-switch_p_job_preinit                            |                        
-fork ------------------ switch_p_job_init       |                        
-waitpid                 setuid, chdir, etc.     |                        
-                        fork N procs -----------+--- switch_p_job_attach 
-                        wait all                |    exec mpi process    
-                        switch_p_job_fini*      |                        
-switch_p_job_postfini                           |                        
+Process 1 (root)        Process 2 (root, user)  |  Process 3 (user task)
+                                                |
+switch_p_job_preinit                            |
+fork ------------------ switch_p_job_init       |
+waitpid                 setuid, chdir, etc.     |
+                        fork N procs -----------+--- switch_p_job_attach
+                        wait all                |    exec mpi process
+                        switch_p_job_fini*      |
+switch_p_job_postfini                           |
 =========================================================================
 </pre>
 
 <p class="commandline">int switch_p_job_preinit (switch_jobinfo_t jobinfo switch_job);</p>
-<p style="margin-left:.2in"><b>Description</b>: Preinit is run as root in the first slurmd process, 
+<p style="margin-left:.2in"><b>Description</b>: Preinit is run as root in the first slurmd process,
 the so called job manager. This function can be used to perform any initialization
 that needs to be performed in the same process as switch_p_job_fini().</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_job_init (switch_jobinfo_t jobinfo switch_job, uid_t uid);</p>
-<p style="margin-left:.2in"><b>Description</b>: Initialize interconnect on node for a job. 
-This function is run from the second slurmd process (some interconnect implementations 
+<p style="margin-left:.2in"><b>Description</b>: Initialize interconnect on node for a job.
+This function is run from the second slurmd process (some interconnect implementations
 may require the switch_p_job_init functions to be executed from a separate process
 than the process executing switch_p_job_fini() [e.g. Quadrics Elan]).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.<br>
-<span class="commandline"> uid</span>&nbsp; &nbsp;&nbsp;(input) the user id 
+<span class="commandline"> uid</span>&nbsp; &nbsp;&nbsp;(input) the user id
 to execute a job.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_job_attach ( switch_jobinfo_t switch_job, char ***env, 
+<p class="commandline">int switch_p_job_attach ( switch_jobinfo_t switch_job, char ***env,
 uint32_t nodeid, uint32_t procid, uint32_t nnodes, uint32_t nprocs, uint32_t rank );</p>
 <p style="margin-left:.2in"><b>Description</b>: Attach process to interconnect
-(Called from within the process, so it is appropriate to set interconnect specific 
+(Called from within the process, so it is appropriate to set interconnect specific
 environment variables here).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.<br>
-<span class="commandline"> env</span>&nbsp; &nbsp;&nbsp;(input/output) the 
-environment variables to be set upon job initiation. Switch specific environment 
+<span class="commandline"> env</span>&nbsp; &nbsp;&nbsp;(input/output) the
+environment variables to be set upon job initiation. Switch specific environment
 variables are added as needed.<br>
 <span class="commandline"> nodeid</span>&nbsp; &nbsp;&nbsp;(input) zero-origin
 id of this node.<br>
 <span class="commandline"> procid</span>&nbsp; &nbsp;&nbsp;(input) zero-origin
 process id local to slurmd and <b>not</b> equivalent to the global task id or MPI rank.<br>
-<span class="commandline"> nnodes</span>&nbsp; &nbsp;&nbsp;(input) count of 
+<span class="commandline"> nnodes</span>&nbsp; &nbsp;&nbsp;(input) count of
 nodes allocated to this job.<br>
-<span class="commandline"> nprocs</span>&nbsp; &nbsp;&nbsp;(input) total count of 
+<span class="commandline"> nprocs</span>&nbsp; &nbsp;&nbsp;(input) total count of
 processes or tasks to be initiated for this job.<br>
 <span class="commandline"> rank</span>&nbsp; &nbsp;&nbsp;(input) zero-origin
 id of this task.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int switch_p_job_fini (switch_jobinfo_t jobinfo switch_job);</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is run from the same process 
+<p style="margin-left:.2in"><b>Description</b>: This function is run from the same process
 as switch_p_job_init() after all job tasks have exited. It is *not* run as root, because
 the process in question has already setuid to the job owner.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_job_postfini ( switch_jobinfo_t switch_job, 
+<p class="commandline">int switch_p_job_postfini ( switch_jobinfo_t switch_job,
 uid_t pgid, uint32_t job_id, uint32_t step_id );</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is run from the initial slurmd 
+<p style="margin-left:.2in"><b>Description</b>: This function is run from the initial slurmd
 process (same process as switch_p_job_preinit()), and is run as root. Any cleanup routines
 that need to be run with root privileges should be run from this function.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's 
+<span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
 switch credential.<br>
-<span class="commandline"> pgid</span>&nbsp; &nbsp;&nbsp;(input) The process 
+<span class="commandline"> pgid</span>&nbsp; &nbsp;&nbsp;(input) The process
 group id associated with this task.<br>
-<span class="commandline"> job_id</span>&nbsp; &nbsp;&nbsp;(input) the 
+<span class="commandline"> job_id</span>&nbsp; &nbsp;&nbsp;(input) the
 associated SLURM job id.<br>
-<span class="commandline"> step_id</span>&nbsp; &nbsp;&nbsp;(input) the 
+<span class="commandline"> step_id</span>&nbsp; &nbsp;&nbsp;(input) the
 associated SLURM job step id.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
-the plugin should return SLURM_ERROR and set the errno to an appropriate value 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int switch_p_job_step_allocated (switch_jobinfo_t 
+<p class="commandline">int switch_p_job_step_allocated (switch_jobinfo_t
 jobinfo switch_job, char *nodelist);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note that the identified 
-job step is active at restart time. This function can be used to 
-restore global switch state information based upon job steps known to be 
-active at restart time. Use of this function is preferred over switch state 
-saved and restored by the switch plugin. Direct use of job step switch 
-information eliminates the possibility of inconsistent state information 
+<p style="margin-left:.2in"><b>Description</b>: Note that the identified
+job step is active at restart time. This function can be used to
+restore global switch state information based upon job steps known to be
+active at restart time. Use of this function is preferred over switch state
+saved and restored by the switch plugin. Direct use of job step switch
+information eliminates the possibility of inconsistent state information
 between the switch and job steps.
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> switch_job</span>&nbsp; &nbsp;&nbsp;(input) a job's
@@ -495,21 +495,21 @@ specific error.</p>
 the switch plugin.</p>
 
 <p class="commandline"><a name="strerror">char *switch_p_strerror(int errnum);</a></p>
-<p style="margin-left:.2in"><b>Description</b>: Return a string description of a switch 
+<p style="margin-left:.2in"><b>Description</b>: Return a string description of a switch
 specific error code.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> errnum</span>&nbsp; &nbsp;&nbsp;(input) a switch 
+<span class="commandline"> errnum</span>&nbsp; &nbsp;&nbsp;(input) a switch
 specific error code.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Pointer to string describing the error 
+<p style="margin-left:.2in"><b>Returns</b>: Pointer to string describing the error
 or NULL if no description found in this plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM Switch API. Future 
-releases of SLURM may revise this API. A switch plugin conveys its ability 
-to implement a particular API version using the mechanism outlined for SLURM plugins. 
-In addition, the credential is transmitted along with the version number of the 
-plugin that transmitted it. It is at the discretion of the plugin author whether 
+<p> This document describes version 0 of the SLURM Switch API. Future
+releases of SLURM may revise this API. A switch plugin conveys its ability
+to implement a particular API version using the mechanism outlined for SLURM plugins.
+In addition, the credential is transmitted along with the version number of the
+plugin that transmitted it. It is at the discretion of the plugin author whether
 to maintain data format compatibility across different versions of the plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml
index f476e2e94384c3c09cafcbc7bf412f9989ece3f3..de4735ac48bea30ab0fff7cabef19c5157ac745c 100644
--- a/doc/html/taskplugins.shtml
+++ b/doc/html/taskplugins.shtml
@@ -3,17 +3,17 @@
 <h1><a name="top">SLURM Task Plugin Programmer Guide</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM task management plugins and the API 
-that defines them. It is intended as a resource to programmers wishing 
+<p> This document describes SLURM task management plugins and the API
+that defines them. It is intended as a resource to programmers wishing
 to write their own SLURM scheduler plugins. This is version 1 of the API.</p>
 
-<p>SLURM task management plugins are SLURM plugins that implement the 
-SLURM task management API described herein. They would typically be 
-used to control task affinity (i.e. binding tasks to processors). 
-They must conform to the SLURM Plugin API with the following 
+<p>SLURM task management plugins are SLURM plugins that implement the
+SLURM task management API described herein. They would typically be
+used to control task affinity (i.e. binding tasks to processors).
+They must conform to the SLURM Plugin API with the following
 specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;task.&quot; The minor type can be any recognizable 
+The major type must be &quot;task.&quot; The minor type can be any recognizable
 abbreviation for the type of task management. We recommend, for example:</p>
 <ul>
 <li><b>affinity</b>&#151;A plugin that implements task binding to processors.
@@ -25,10 +25,10 @@ and the value of the <b>TaskPluginParam</b> as defined in the <b>slurm.conf</b>
 services. This is the default behavior and provides no task binding.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization 
-for task support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization
+for task support.
 Note carefully, however, the versioning discussion below.</p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -38,37 +38,37 @@ Note carefully, however, the versioning discussion below.</p>
 enumerated <span class="commandline">errno</span>  to allow SLURM to discover
 as practically as possible the reason for any failed API call.
 These values must not be used as return values in integer-valued functions
-in the API. The proper error return value from integer-valued functions is 
+in the API. The proper error return value from integer-valued functions is
 SLURM_ERROR.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. Functions which are not implemented should 
+<p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
-<p class="commandline">int task_slurmd_batch_request (uint32_t job_id, 
+<p class="commandline">int task_slurmd_batch_request (uint32_t job_id,
 batch_job_launch_msg_t *req);</p>
 <p style="margin-left:.2in"><b>Description</b>: Prepare to launch a batch job.
 Establish node, socket, and core resource availability for it.
 Executed by the <b>slurmd</b> daemon as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input) 
+<span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the job to be started.<br>
 <span class="commandline">req</span>&nbsp;&nbsp;&nbsp;(input/output)
 Batch job launch request specification.
 See <b>src/common/slurm_protocol_defs.h</b> for the
 data structure definition.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
-On failure, the plugin should return SLURM_ERROR and set the errno to an 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<p class="commandline">int task_slurmd_launch_request (uint32_t job_id, 
+<p class="commandline">int task_slurmd_launch_request (uint32_t job_id,
 launch_tasks_request_msg_t *req, uint32_t node_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Prepare to launch a job.
 Establish node, socket, and core resource availability for it.
 Executed by the <b>slurmd</b> daemon as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input) 
+<span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the job to be started.<br>
 <span class="commandline">req</span>&nbsp;&nbsp;&nbsp;(input/output)
 Task launch request specification including node, socket, and
@@ -77,34 +77,34 @@ See <b>src/common/slurm_protocol_defs.h</b> for the
 data structure definition.<br>
 <span class="commandline">node_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the node on which resources are being acquired (zero origin).</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
-On failure, the plugin should return SLURM_ERROR and set the errno to an 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int task_slurmd_reserve_resources (uint32_t job_id,
 launch_tasks_request_msg_t *req, uint32_t node_id);</p>
-<p style="margin-left:.2in"><b>Description</b>: Reserve resources for 
+<p style="margin-left:.2in"><b>Description</b>: Reserve resources for
 the initiation of a job. Executed by the <b>slurmd</b> daemon as user root.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the job being started.<br>
 <span class="commandline">req</span>&nbsp;&nbsp;&nbsp;(input)
-Task launch request specification including node, socket, and 
+Task launch request specification including node, socket, and
 core specifications.
 See <b>src/common/slurm_protocol_defs.h</b> for the
 data structure definition.<br>
 <span class="commandline">node_id</span>&nbsp;&nbsp;&nbsp;(input)
-ID of the node on which the resources are being acquired 
+ID of the node on which the resources are being acquired
 (zero origin).</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int task_slurmd_suspend_job (uint32_t job_id);</p>
-<p style="margin-left:.2in"><b>Description</b>: Temporarily release resources 
-previously reserved for a job. 
+<p style="margin-left:.2in"><b>Description</b>: Temporarily release resources
+previously reserved for a job.
 Executed by the <b>slurmd</b> daemon as user root.</p>
-<p style="margin-left:.2in"><b>Arguments</b>: 
+<p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the job which is being suspended.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
@@ -112,10 +112,10 @@ On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int task_slurmd_resume_job (uint32_t job_id);</p>
-<p style="margin-left:.2in"><b>Description</b>: Reclaim resources which 
+<p style="margin-left:.2in"><b>Description</b>: Reclaim resources which
 were previously released using the task_slurmd_suspend_job function.
 Executed by the <b>slurmd</b> daemon as user root.</p>
-<p style="margin-left:.2in"><b>Arguments</b>: 
+<p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the job which is being resumed.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
@@ -123,9 +123,9 @@ On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int task_slurmd_release_resources (uint32_t job_id);</p>
-<p style="margin-left:.2in"><b>Description</b>: Release resources previously 
+<p style="margin-left:.2in"><b>Description</b>: Release resources previously
 reserved for a job. Executed by the <b>slurmd</b> daemon as user root.</p>
-<p style="margin-left:.2in"><b>Arguments</b>: 
+<p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input)
 ID of the job which has completed.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
@@ -133,10 +133,10 @@ On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int task_pre_setuid (slurmd_job_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: task_pre_setuid() is called 
-before setting the UID for the user to launch his jobs. 
+<p style="margin-left:.2in"><b>Description</b>: task_pre_setuid() is called
+before setting the UID for the user to launch his jobs.
 Executed by the <b>slurmstepd</b> program as user root.</p>
-<p style="margin-left:.2in"><b>Arguments</b>: 
+<p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job</span>&nbsp;&nbsp;&nbsp;(input)
 pointer to the job to be initiated.
 See <b>src/slurmd/slurmstepd/slurmstepd_job.h</b> for the
@@ -146,10 +146,10 @@ On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int task_pre_launch (slurmd_job_t *job);</p>
-<p style="margin-left:.2in"><b>Description</b>: task_pre_launch() is called 
+<p style="margin-left:.2in"><b>Description</b>: task_pre_launch() is called
 prior to exec of application task.
 Executed by the <b>slurmstepd</b> program as the job's owner.
-It is followed by <b>TaskProlog</b> program (as configured in <b>slurm.conf</b>) 
+It is followed by <b>TaskProlog</b> program (as configured in <b>slurm.conf</b>)
 and <b>--task-prolog</b> (from <b>srun</b> command line).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job</span>&nbsp;&nbsp;&nbsp;(input)
@@ -161,7 +161,7 @@ On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
 <a name="get_errno"><p class="commandline">int task_post_term (slurmd_job_t *job);</p></a>
-<p style="margin-left:.2in"><b>Description</b>: task_term() is called 
+<p style="margin-left:.2in"><b>Description</b>: task_term() is called
 after termination of job step.
 Executed by the <b>slurmstepd</b> program as the job's owner.
 It is preceded by <b>--task-epilog</b> (from <b>srun</b> command line)
@@ -169,7 +169,7 @@ followed by <b>TaskEpilog</b> program (as configured in <b>slurm.conf</b>).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job</span>&nbsp;&nbsp;&nbsp;(input)
 pointer to the job which has terminated.
-See <b>src/slurmd/slurmstepd/slurmstepd_job.h</b> for the 
+See <b>src/slurmd/slurmstepd/slurmstepd_job.h</b> for the
 data structure definition.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
 On failure, the plugin should return SLURM_ERROR and set the errno to an
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index ecb47e6bc2a99dd7bac4ae6327beaf79f690bc28..8a8780219f237fb78167873cf4ebf1f6b154082b 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -1,7 +1,7 @@
 <!--#include virtual="header.txt"-->
 
 <h1>SLURM Team</h1>
-<p>SLURM development has been a joint effort of 
+<p>SLURM development has been a joint effort of
 <a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory</a> (LLNL),
 <a href="http://www.hp.com/">HP</a>,
 <a href="http://www.bull.com/">Bull</a>,
@@ -54,9 +54,9 @@ Linux NetworX and many other contributors.
 <li>Bj&oslash;rn-Helge Mevik (University of Oslo, Norway)</li>
 <li>Chris Morrone (LLNL)</li>
 <li>Pere Munt (Barcelona Supercomputer Center, Spain)</li>
-<li>Michal Novotny (Masaryk University, Czech Republic)</li> 
+<li>Michal Novotny (Masaryk University, Czech Republic)</li>
 <li>Bryan O'Sullivan (Pathscale)</li>
-<li>Gennaro Oliva (Institute of High Performance Computing and 
+<li>Gennaro Oliva (Institute of High Performance Computing and
     Networking, Italy)</li>
 <li>Daniel Palermo (HP)</li>
 <li>Dan Phung (LLNL/Columbia University)</li>
diff --git a/doc/html/testimonials.shtml b/doc/html/testimonials.shtml
index 886ad00dc67f786cad93420d0f75b3e7491a7ef5..c839de9782edd3619abafe77dd84fa59fe585cd1 100644
--- a/doc/html/testimonials.shtml
+++ b/doc/html/testimonials.shtml
@@ -3,17 +3,17 @@
 <h1>Customer Testimonials</h1>
 
 <i>
-"Today our largest IBM computers, BlueGene/L and Purple, ranked #1 and #3 
+"Today our largest IBM computers, BlueGene/L and Purple, ranked #1 and #3
 respectively on the November 2005 Top500 list, use SLURM.
 This decision reduces large job launch times from tens of minutes to seconds.
-This effectively provides 
-us with millions of dollars with of additional compute resources without 
-additional cost.  It also allows our computational scientists to use their 
-time more effectively.  SLURM is scalable to very large numbers of processors, 
-another essential ingredient for use at LLNL. This means larger computer 
-systems can be used than otherwise possible with a commensurate increase in 
-the scale of problems that can be solved. SLURM's scalability has eliminated 
-resource management from being a concern for computers of any foreseeable 
+This effectively provides
+us with millions of dollars with of additional compute resources without
+additional cost.  It also allows our computational scientists to use their
+time more effectively.  SLURM is scalable to very large numbers of processors,
+another essential ingredient for use at LLNL. This means larger computer
+systems can be used than otherwise possible with a commensurate increase in
+the scale of problems that can be solved. SLURM's scalability has eliminated
+resource management from being a concern for computers of any foreseeable
 size. It is one of the best things to happen to massively parallel computing."
 <br><br>
 Dona Crawford, Associate Directory Lawrence Livermore National Laboratory
@@ -23,9 +23,9 @@ Dona Crawford, Associate Directory Lawrence Livermore National Laboratory
 <i>
 "Thank you for SLURM! It is one of the nicest pieces of free software
 for managing HPC clusters we have come across in a long time.
-Both of our Blue Genes are running SLURM and it works fantastically 
+Both of our Blue Genes are running SLURM and it works fantastically
 well.
-It's the most flexible, useful scheduling tool I've ever run 
+It's the most flexible, useful scheduling tool I've ever run
 across."<br><br>
 Adam Todorski, Computational Center for Nanotechnology Inovations,
 Rensselaer Polytechnic Institute
@@ -41,8 +41,8 @@ Aaron Knister, Environmental Protection Agency
 <HR SIZE=4>
 
 <i>
-"We are extremely pleased with SLURM and strongly recommend it to others 
-because it is mature, the developers are highly responsive and 
+"We are extremely pleased with SLURM and strongly recommend it to others
+because it is mature, the developers are highly responsive and
 it just works."<br><br>
 Jeffrey M. Squyres, Pervasive Technology Labs at Indiana University
 </i>
@@ -77,10 +77,10 @@ Oak Ridge National Laboratory
 <HR SIZE=4>
 
 <i>
-"SLURM is under active development, is easy to use, works quite well, 
-and most important to your harried author, it hasn't been a nightmare 
-to configure or manage. (Strong praise, that.) I would range SLURM as 
-the best of the three open source batching systems available, by rather 
+"SLURM is under active development, is easy to use, works quite well,
+and most important to your harried author, it hasn't been a nightmare
+to configure or manage. (Strong praise, that.) I would range SLURM as
+the best of the three open source batching systems available, by rather
 a large margin." <br><br>
 Bryan O'Sullivan, Pathscale
 </i>
@@ -88,22 +88,22 @@ Bryan O'Sullivan, Pathscale
 
 <i>
 "SLURM scales perfectly to the size of MareNostrum without noticeable
-performance degradation; the daemons running on the compute nodes are 
-light enough to not interfere with the applications' processes and the 
-status reports are accurate and concise, allowing us to spot possible 
+performance degradation; the daemons running on the compute nodes are
+light enough to not interfere with the applications' processes and the
+status reports are accurate and concise, allowing us to spot possible
 anomalies in a single sight." <br><br>
 Erest Artiaga, Barcelona Supercomputing Center
 </i>
 <HR SIZE=4>
 
 <i>
-"SLURM was a great help for us in implementing our own very concise 
-job management system on top of it which could be tailored precisely 
-to our needs, and which at the same time is very simple to use for 
-our customers. 
-In general, we are impressed with the stability, scalability, and performance 
-of SLURM. Furthermore, SLURM is very easy to configure and use. The fact that 
-SLURM is open-source software with a free license is also advantageous for us 
+"SLURM was a great help for us in implementing our own very concise
+job management system on top of it which could be tailored precisely
+to our needs, and which at the same time is very simple to use for
+our customers.
+In general, we are impressed with the stability, scalability, and performance
+of SLURM. Furthermore, SLURM is very easy to configure and use. The fact that
+SLURM is open-source software with a free license is also advantageous for us
 in terms of cost-benefit considerations." <br><br>
 Dr. Wilfried Juling, Direktor, Scientific Supercomputing Center,
 University of Karlsruhe
@@ -113,20 +113,20 @@ University of Karlsruhe
 <i>
 "I had missed SLURM initially when looking for software for a cluster and
 ended up installing Torque. When I found out about SLURM later, it took
-me only a couple of days to go from knowing nothing about it to having a 
+me only a couple of days to go from knowing nothing about it to having a
 SLURM cluster than ran better than the Torque one. I just wanted to say that
-your focus on more "secondary" stuff in cluster software, like security, 
+your focus on more "secondary" stuff in cluster software, like security,
 usability and ease of getting started is *really* appreciated." <br><br>
 Christian Hudson, ApSTAT Technologies
 </i>
 <HR SIZE=4>
 
 <i>
-"SLURM has been adopted as the parallel allocation infrastructure used 
-in HP's premier cluster stack, XC System Software. SLURM has permitted 
+"SLURM has been adopted as the parallel allocation infrastructure used
+in HP's premier cluster stack, XC System Software. SLURM has permitted
 easy scaling of parallel applications on cluster systems with thousands
-of processors, and has also proven itself to be highly portable and 
-efficient between interconnects including Quadrics, QsNet, Myrinet, 
+of processors, and has also proven itself to be highly portable and
+efficient between interconnects including Quadrics, QsNet, Myrinet,
 Infiniband and Gigabit Ethernet."
 <br><br>
 Bill Celmaster, XC Program Manager, Hewlett-Packard Company
diff --git a/doc/html/topo_ex2.gif b/doc/html/topo_ex2.gif
index a0f0e12fada8e99f588809a015d82f27338ee9fd..786d777a3cd593b987c26a83bad082047c32aebb 100644
Binary files a/doc/html/topo_ex2.gif and b/doc/html/topo_ex2.gif differ
diff --git a/doc/html/topology.shtml b/doc/html/topology.shtml
index 58b65811faa4274a4f4c1804b4016e338eb87a0b..8c06e2d753ee6abe18947a41bcc1b65710cf6065 100644
--- a/doc/html/topology.shtml
+++ b/doc/html/topology.shtml
@@ -9,7 +9,7 @@ systems with a three-dimensional torus interconnect and another for
 a hierarchical interconnect.</p>
 
 <p>SLURM's native mode of resource selection is to consider the nodes
-as a one-dimensional array. 
+as a one-dimensional array.
 Jobs are allocated resources on a best-fit basis.
 For larger jobs, this minimizes the number of sets of consecutive nodes
 allocated to the job.</p>
@@ -18,18 +18,18 @@ allocated to the job.</p>
 
 <p>Some larger computers rely upon a three-dimensional torus interconnect.
 The IBM BlueGene computers is one example of this which has highly
-constrained resource allocation scheme, essentially requiring that 
+constrained resource allocation scheme, essentially requiring that
 jobs be allocated a set of nodes logically having a rectangular shape.
 SLURM has a plugin specifically written for BlueGene to select appropriate
 nodes for jobs, change network switch routing, boot nodes, etc as described
 in the <a href="bluegene.html">BlueGene User and Administrator Guide</a>.</p>
 
-<p>The Sun Constellation and Cray XT systems also have three-dimensional 
+<p>The Sun Constellation and Cray XT systems also have three-dimensional
 torus interconnects, but do not require that jobs execute in adjacent nodes.
-On those systems, SLURM only needs to allocate resources to a job which 
-are nearby on the network. 
-SLURM accomplishes this using a 
-<a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a> 
+On those systems, SLURM only needs to allocate resources to a job which
+are nearby on the network.
+SLURM accomplishes this using a
+<a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>
 to map the nodes from a three-dimensional space into a one-dimensional
 space.
 SLURM's native best-fit algorithm is thus able to achieve a high degree
@@ -40,21 +40,21 @@ For more information, see SLURM's documentation for
 
 <h2>Hierarchical Networks</h2>
 
-<p>SLURM can also be configured to allocate resources to jobs on a 
+<p>SLURM can also be configured to allocate resources to jobs on a
 hierarchical network to minimize network contention.
 The basic algorithm is to identify the lowest level switch in the
 hierarchy that can satisfy a job's request and then allocate resources
 on its underlying leaf switches using a best-fit algorithm.
-Use of this logic requires a configuration setting of 
+Use of this logic requires a configuration setting of
 <i>TopologyPlugin=topology/tree</i>.</p>
 
-<p>At some point in the future SLURM code may be provided to 
+<p>At some point in the future SLURM code may be provided to
 gather network topology information directly.
-Now the network topology information must be included 
-in a <i>topology.conf</i> configuration file as shown in the 
+Now the network topology information must be included
+in a <i>topology.conf</i> configuration file as shown in the
 examples below.
-The first example describes a three level switch in which 
-each switch has two children. 
+The first example describes a three level switch in which
+each switch has two children.
 Note that the <i>SwitchName</i> values are arbitrary and only
 used to bookkeeping purposes, but a name must be specified on
 each line.
@@ -68,14 +68,14 @@ names need not be consecutive (e.g. "Nodes=tux[0-3,12,18-20]"
 and "Switches=s[0-2,4-8,12]" will parse fine).
 </p>
 
-<p>An optional LinkSpeed option can be used to indicate the 
-relative performance of the link. 
+<p>An optional LinkSpeed option can be used to indicate the
+relative performance of the link.
 The units used are arbitrary and this information is currently not used.
 It may be used in the future to optimize resource allocations.</p>
 
 <p>The first example shows what a topology would look like for an
 eight node cluster in which all switches have only two children as
-shown in the diagram (not a very realistic configuration, but 
+shown in the diagram (not a very realistic configuration, but
 useful for an example).</p>
 
 <pre>
@@ -91,7 +91,7 @@ SwitchName=s6 Switches=s[4-5]
 </pre>
 <img src=topo_ex1.gif width=600>
 
-<p>The next example is for a network with two levels and 
+<p>The next example is for a network with two levels and
 each switch has four connections.</p>
 <pre>
 # topology.conf
@@ -107,8 +107,8 @@ SwitchName=s7 Switches=s[0-3]  LinkSpeed=1800
 </pre>
 <img src=topo_ex2.gif width=600>
 
-<p>As a practical matter, listing every switch connection 
-definitely results in a slower scheduling algorithm for SLURM 
+<p>As a practical matter, listing every switch connection
+definitely results in a slower scheduling algorithm for SLURM
 to optimize job placement.
 The application performance may achieve little benefit from such optimization.
 Listing the leaf switches with their nodes plus one top level switch
diff --git a/doc/html/topology_plugin.shtml b/doc/html/topology_plugin.shtml
index 305a8ca7c42d148105db32f29a1a31633e1328c0..0cf8ce1a725e4f09c7c91c8576d4eba25c634be5 100644
--- a/doc/html/topology_plugin.shtml
+++ b/doc/html/topology_plugin.shtml
@@ -3,21 +3,21 @@
 <h1><a name="top">SLURM Topology Plugin Programmer Guide</a></h1>
 
 <h2> Overview</h2>
-<p> This document describes SLURM topology plugin and the API that 
-defines them. 
-It is intended as a resource to programmers wishing to write their own 
-SLURM topology plugin. 
+<p> This document describes SLURM topology plugin and the API that
+defines them.
+It is intended as a resource to programmers wishing to write their own
+SLURM topology plugin.
 This is version 100 of the API.</p>
 
-<p>SLURM topology plugins are SLURM plugins that implement 
-convey system topology information so that SLURM is able to 
-optimize resource allocations and minimize communication overhead. 
-The plugins must conform to the SLURM Plugin API with the following 
+<p>SLURM topology plugins are SLURM plugins that implement
+convey system topology information so that SLURM is able to
+optimize resource allocations and minimize communication overhead.
+The plugins must conform to the SLURM Plugin API with the following
 specifications:</p>
 
 <p><span class="commandline">const char plugin_type[]</span><br>
-The major type must be &quot;topology.&quot; 
-The minor type specifies the type of topology mechanism. 
+The major type must be &quot;topology.&quot;
+The minor type specifies the type of topology mechanism.
 We recommend, for example:</p>
 <ul>
 <li><b>3d_torus</b>&#151;Optimize placement for a three dimensional torus.</li>
@@ -26,10 +26,10 @@ We recommend, for example:</p>
 switches.</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and 
-<span class="commandline">plugin_version</span> 
-symbols required by the SLURM Plugin API require no specialization for 
-topology support. 
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span>
+symbols required by the SLURM Plugin API require no specialization for
+topology support.
 The actions performed by these plugins vary widely.
 In the case of <b>3d_torus</b>, the nodes in configuration file
 are re-ordered so that nodes which are nearby in the one-dimensional
@@ -40,28 +40,28 @@ to optimize placement.
 Note carefully, however, the versioning discussion below.</p>
 
 <h2>Data Objects</h2>
-<p>The implementation must maintain (though not necessarily directly export) an 
-enumerated <span class="commandline">errno</span> to allow SLURM to discover 
-as practically as possible the reason for any failed API call. 
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <span class="commandline">errno</span> to allow SLURM to discover
+as practically as possible the reason for any failed API call.
 Plugin-specific enumerated integer values may be used when appropriate.
 
-<p>These values must not be used as return values in integer-valued 
-functions in the API. 
-The proper error return value from integer-valued functions is SLURM_ERROR. 
-The implementation should endeavor to provide useful and pertinent 
-information by whatever means is practical. 
-Successful API calls are not required to reset any errno to a known value. 
-However, the initial value of any errno, prior to any error condition 
+<p>These values must not be used as return values in integer-valued
+functions in the API.
+The proper error return value from integer-valued functions is SLURM_ERROR.
+The implementation should endeavor to provide useful and pertinent
+information by whatever means is practical.
+Successful API calls are not required to reset any errno to a known value.
+However, the initial value of any errno, prior to any error condition
 arising, should be SLURM_SUCCESS. </p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>API Functions</h2>
-<p>The following functions must appear. 
+<p>The following functions must appear.
 Functions which are not implemented should be stubbed.</p>
 
 <p class="commandline">int topo_build_config(void);</p>
 <p style="margin-left:.2in"><b>Description</b>: Generate topology information.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or
 SLURM_ERROR on failure.</p>
 
 <p class="commandline">int topo_get_node_addr(char* node_name, char** paddr, char** ppatt);</p>
@@ -69,18 +69,18 @@ SLURM_ERROR on failure.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <b>node_name</b> (input) name of the targeted node<br>
 <b>paddr</b> (output) returns the topology address of the node and connected
-switches. If there are multiple switches at some level in the hierarchy, they 
-will be represented using SLURM's hostlist expression (e.g. "s0" and "s1" are 
-reported as "s[0-1]").  Each level in the hiearchy is separated by a period. 
+switches. If there are multiple switches at some level in the hierarchy, they
+will be represented using SLURM's hostlist expression (e.g. "s0" and "s1" are
+reported as "s[0-1]").  Each level in the hiearchy is separated by a period.
 The last element will always be the node's name (i.e. "s0.s10.nodename")<br>
-<b>ppatt</b> (output) returns the pattern of the topology address. Each level 
-in the hierarchy is separated by a period. The final element will always be 
+<b>ppatt</b> (output) returns the pattern of the topology address. Each level
+in the hierarchy is separated by a period. The final element will always be
 "node" (i.e. "switch.switch.node")<br>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or 
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or
 SLURM_ERROR on failure.</p>
 
 <h2>Versioning</h2>
-<p> This document describes version 100 of the SLURM topology API. 
+<p> This document describes version 100 of the SLURM topology API.
 Future releases of SLURM may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/html/troubleshoot.shtml b/doc/html/troubleshoot.shtml
index 6f60aed98cf942410e8f07f27f573e1ba61f72ba..676d79204b1ea9fbafdb267e44be151fefd50a0f 100644
--- a/doc/html/troubleshoot.shtml
+++ b/doc/html/troubleshoot.shtml
@@ -4,7 +4,7 @@
 
 <p>This guide is meant as a tool to help system administrators
 or operators troubleshoot SLURM failures and restore services.
-The <a href="faq.html">Frequently Asked Questions</a> document 
+The <a href="faq.html">Frequently Asked Questions</a> document
 may also prove useful.</p>
 
 <ul>
@@ -27,7 +27,7 @@ state manually</a></li>
 <li><a href="#bluegene-error-state2">How to set a sub base partition
 which doesn't have a block already created in an error
 state manually</a></li>
-<li><a href="#bluegene-block-create">How to make a <i>bluegene.conf</i> 
+<li><a href="#bluegene-block-create">How to make a <i>bluegene.conf</i>
 file that will load in SLURM</a></li>
 </ul>
 </ul>
@@ -36,40 +36,40 @@ file that will load in SLURM</a></li>
 <h2><a name="resp">SLURM is not responding</a></h2>
 
 <ol>
-<li>Execute "<i>scontrol ping</i>" to determine if the primary 
+<li>Execute "<i>scontrol ping</i>" to determine if the primary
 and backup controllers are responding.
 
-<li>If it responds for you, this could be a <a href="#network">networking 
-or configuration problem</a> specific to some user or node in the 
-cluster.</li> 
+<li>If it responds for you, this could be a <a href="#network">networking
+or configuration problem</a> specific to some user or node in the
+cluster.</li>
 
 <li>If not responding, directly login to the machine and try again
 to rule out <a href="#network">network and configuration problems</a>.</li>
 
-<li>If still not responding, check if there is an active slurmctld 
+<li>If still not responding, check if there is an active slurmctld
 dameon by executing "<i>ps -el | grep slurmctld</i>".</li>
 
 <li>If slurmctld is not running, restart it (typically as user root
 using the command "<i>/etc/init.d/slurm start</i>").
-You should check the log file (<i>SlurmctldLog</i> in the 
+You should check the log file (<i>SlurmctldLog</i> in the
 <i>slurm.conf</i> file) for an indication of why it failed.
 If it keeps failing, you should contact the slurm team for help at
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>.</li>
 
-<li>If slurmctld is running but not responding (a very rare situation), 
-then kill and restart it (typically as user root using the commands 
+<li>If slurmctld is running but not responding (a very rare situation),
+then kill and restart it (typically as user root using the commands
 "<i>/etc/init.d/slurm stop</i>" and then "<i>/etc/init.d/slurm start</i>").</li>
 
 <li>If it hangs again, increase the verbosity of debug messages
-(increase <i>SlurmctldDebug</i> in the <i>slurm.conf</i> file) 
-and restart.  
+(increase <i>SlurmctldDebug</i> in the <i>slurm.conf</i> file)
+and restart.
 Again check the log file for an indication of why it failed.
 At this point, you should contact the slurm team for help at
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>.</li>
 
-<li>If it continues to fail without an indication as to the failure 
-mode, restart without preserving state (typically as user root 
-using the commands "<i>/etc/init.d/slurm stop</i>" 
+<li>If it continues to fail without an indication as to the failure
+mode, restart without preserving state (typically as user root
+using the commands "<i>/etc/init.d/slurm stop</i>"
 and then "<i>/etc/init.d/slurm startclean</i>").
 Note: All running jobs and other state information will be lost.</li>
 </ol>
@@ -78,34 +78,34 @@ Note: All running jobs and other state information will be lost.</li>
 
 <h2><a name="sched">Jobs are not getting scheduled</a></h2>
 
-<p>This is dependent upon the scheduler used by SLURM. 
+<p>This is dependent upon the scheduler used by SLURM.
 Executing the command "<i>scontrol show config | grep SchedulerType</i>"
 to determine this.
 For any scheduler, you can check priorities of jobs using the
 command "<i>scontrol show job</i>".</p>
 
 <ul>
-<li>If the scheduler type is <i>builtin</i>, then jobs will be executed 
+<li>If the scheduler type is <i>builtin</i>, then jobs will be executed
 in the order of submission for a given partition.
-Even if resources are available to initiate jobs immediately, 
+Even if resources are available to initiate jobs immediately,
 it will be deferred until no previously submitted job is pending.</li>
 
-<li>If the scheduler type is <i>backfill</i>, then jobs will generally 
-be executed in the order of submission for a given partition with one 
-exception: later submitted jobs will be initiated early if doing so 
-does not delay the expected execution time of an earlier submitted job. 
-In order for backfill scheduling to be effective, users jobs should 
-specify reasonable time limits. 
-If jobs do not specify time limits, then all jobs will receive the 
-same time limit (that associated with the partition), and the ability 
+<li>If the scheduler type is <i>backfill</i>, then jobs will generally
+be executed in the order of submission for a given partition with one
+exception: later submitted jobs will be initiated early if doing so
+does not delay the expected execution time of an earlier submitted job.
+In order for backfill scheduling to be effective, users jobs should
+specify reasonable time limits.
+If jobs do not specify time limits, then all jobs will receive the
+same time limit (that associated with the partition), and the ability
 to backfill schedule jobs will be limited.
-The backfill scheduler does not alter job specifications of required 
-or excluded nodes, so jobs which specify nodes will substantially 
+The backfill scheduler does not alter job specifications of required
+or excluded nodes, so jobs which specify nodes will substantially
 reduce the effectiveness of backfill scheduling.
 See the <a href="faq.html#backfill">backfill documentation</a>
 for more details.</li>
 
-<li>If the scheduler type is <i>wiki</i>, this represents 
+<li>If the scheduler type is <i>wiki</i>, this represents
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
 The Maui Scheduler</a> or
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
@@ -118,18 +118,18 @@ Please refer to its documentation for help.</li>
 <h2><a name="completing">Jobs and nodes are stuck in COMPLETING state</a></h2>
 
 <p>This is typically due to non-killable processes associated with the job.
-SLURM will continue to attempt terminating the processes with SIGKILL, but 
-some jobs may stuck performing I/O and non-killable. 
-This is typically due to a file system problem and may be addressed in 
+SLURM will continue to attempt terminating the processes with SIGKILL, but
+some jobs may stuck performing I/O and non-killable.
+This is typically due to a file system problem and may be addressed in
 a couple of ways.</p>
 <ol>
-<li>Fix the file system and/or reboot the node. <b>-OR-</b></li> 
-<li>Set the node to a DOWN state and then return it to service 
-("<i>scontrol update NodeName=&lt;node&gt; State=down Reason=hung_proc</i>" 
-and "<i>scontrol update NodeName=&lt;node&gt; State=resume</i>"). 
-This permits other jobs to use the node, but leaves the non-killable 
+<li>Fix the file system and/or reboot the node. <b>-OR-</b></li>
+<li>Set the node to a DOWN state and then return it to service
+("<i>scontrol update NodeName=&lt;node&gt; State=down Reason=hung_proc</i>"
+and "<i>scontrol update NodeName=&lt;node&gt; State=resume</i>").
+This permits other jobs to use the node, but leaves the non-killable
 process in place.
-If the process should ever complete the I/O, the pending SIGKILL 
+If the process should ever complete the I/O, the pending SIGKILL
 should terminate it immediately.</li>
 </ol>
 
@@ -140,30 +140,30 @@ should terminate it immediately.</li>
 <ol>
 <li>Check the reason why the node is down using the command
 "<i>scontrol show node &lt;name&gt;</i>".
-This will show the reason why the node was set down and the 
-time when it happened. 
-If there is insufficient disk space, memory space, etc. compared 
-to the parameters specified in the <i>slurm.conf</i> file then 
+This will show the reason why the node was set down and the
+time when it happened.
+If there is insufficient disk space, memory space, etc. compared
+to the parameters specified in the <i>slurm.conf</i> file then
 either fix the node or change <i>slurm.conf</i>.</li>
 
-<li>If the reason is "Not responding", then check communications 
+<li>If the reason is "Not responding", then check communications
 between the control machine and the DOWN node using the command
-"<i>ping &lt;address&gt;</i>" being sure to specify the 
-NodeAddr values configured in <i>slurm.conf</i>. 
+"<i>ping &lt;address&gt;</i>" being sure to specify the
+NodeAddr values configured in <i>slurm.conf</i>.
 If ping fails, then fix the network or addresses in <i>slurm.conf</i>.</li>
 
-<li>Next, login to a node that SLURM considers to be in a DOWN 
+<li>Next, login to a node that SLURM considers to be in a DOWN
 state and check if the slurmd daemon is running with the command
 "<i>ps -el | grep slurmd</i>".
 If slurmd is not running, restart it (typically as user root
 using the command "<i>/etc/init.d/slurm start</i>").
 You should check the log file (<i>SlurmdLog</i> in the
 <i>slurm.conf</i> file) for an indication of why it failed.
-You can get the status of the running slurmd daemon by 
-executing the command "<i>scontrol show slurmd</i>" on 
+You can get the status of the running slurmd daemon by
+executing the command "<i>scontrol show slurmd</i>" on
 the node of interest.
-Check the value of "Last slurmctld msg time" to determine 
-if the slurmctld is able to communicate with the slurmd. 
+Check the value of "Last slurmctld msg time" to determine
+if the slurmctld is able to communicate with the slurmd.
 If it keeps failing, you should contact the slurm team for help at
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>.</li>
 
@@ -171,7 +171,7 @@ If it keeps failing, you should contact the slurm team for help at
 then kill and restart it (typically as user root using the commands
 "<i>/etc/init.d/slurm stop</i>" and then "<i>/etc/init.d/slurm start</i>").</li>
 
-<li>If still not responding, try again to rule out 
+<li>If still not responding, try again to rule out
 <a href="#network">network and configuration problems</a>.</li>
 
 <li>If still not responding, increase the verbosity of debug messages
@@ -193,27 +193,27 @@ Note: All jobs and other state information on that node will be lost.</li>
 
 <ol>
 <li>Check the controller and/or slurmd log files (<i>SlurmctldLog</i>
-and <i>SlurmdLog</i> in the <i>slurm.conf</i> file) for an indication 
+and <i>SlurmdLog</i> in the <i>slurm.conf</i> file) for an indication
 of why it is failing.</li>
 
 <li>Check for consistent <i>slurm.conf</i> and credential files on
 the node(s) experiencing problems.</li>
 
-<li>If this is user-specific problem, check that the user is 
-configured on the controller computer(s) as well as the 
+<li>If this is user-specific problem, check that the user is
+configured on the controller computer(s) as well as the
 compute nodes.
-The user doesn't need to be able to login, but his user ID 
+The user doesn't need to be able to login, but his user ID
 must exist.</li>
 
-<li>Check that a consistent version of SLURM exists on all of 
+<li>Check that a consistent version of SLURM exists on all of
 the nodes (execute "<i>sinfo -V</i>" or "<i>rpm -qa | grep slurm</i>").
-If the first two digits of the version number match it should 
-work fine, but version 1.1 commands will not work with 
-version 1.2 daemons or vise-versa.</li> 
+If the first two digits of the version number match it should
+work fine, but version 1.1 commands will not work with
+version 1.2 daemons or vise-versa.</li>
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2><a name="bluegene-block-error">Bluegene: 
+<h2><a name="bluegene-block-error">Bluegene:
 Why is a block in an error state</a></h2>
 
 <ol>
@@ -221,12 +221,12 @@ Why is a block in an error state</a></h2>
 file) for an indication of why it is failing. (grep for update_block:)</li>
 <li>If the reason was something that happened to the system like a
 failed boot or a nodecard going bad or something like that you will
-need to fix the problem and then 
+need to fix the problem and then
 <a href="#bluegene-block-free">manually set the block to free</a>.</li>
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2><a name="bluegene-block-norun">Bluegene: How to make it so no jobs 
+<h2><a name="bluegene-block-norun">Bluegene: How to make it so no jobs
 will run on a block</a></h2>
 
 <ol>
@@ -237,7 +237,7 @@ href="#bluegene-block-free">manually set the block to free</a>.</li>
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2><a name="bluegene-block-check">Bluegene: Static blocks in 
+<h2><a name="bluegene-block-check">Bluegene: Static blocks in
 <i>bluegene.conf</i> file not loading</a></h2>
 
 <ol>
@@ -253,7 +253,7 @@ href="#bluegene-block-create">click here</a></li>
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2><a name="bluegene-block-free">Bluegene: How to free a block(s) 
+<h2><a name="bluegene-block-free">Bluegene: How to free a block(s)
 manually</a></h2>
 <ul>
 <li><b>Using sfree</b></li>
@@ -268,7 +268,7 @@ manually</a></h2>
 </ul>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2><a name="bluegene-error-state">Bluegene: How to set a block in 
+<h2><a name="bluegene-error-state">Bluegene: How to set a block in
 an error state manually</a></h2>
 
 <ol>
@@ -284,11 +284,11 @@ state manually</a></h2>
 <li>Run "<i>scontrol update state=ERROR subBPName=IONODE_LIST</i>".</li>
 IONODE_LIST is a list of the ionodes you want to down in a certain base
 partition i.e. bg000[0-3] will down the first 4 ionodes in base
-partition 000. 
+partition 000.
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2><a name="bluegene-block-create">Bluegene: How to make a 
+<h2><a name="bluegene-block-create">Bluegene: How to make a
 <i>bluegene.conf</i> file that will load in SLURM</a></h2>
 
 <ol>
diff --git a/doc/jsspp/Makefile b/doc/jsspp/Makefile
index e3cf1e90cbd2119cd89532a50387691cf5a06c0f..54f8f6d0b2266451acacd57ca90e9bd6508a63d2 100644
--- a/doc/jsspp/Makefile
+++ b/doc/jsspp/Makefile
@@ -10,7 +10,7 @@
 
 REPORT = jsspp
 
-TEX = ../common/llnlCoverPage.tex $(REPORT).tex 
+TEX = ../common/llnlCoverPage.tex $(REPORT).tex
 
 FIGDIR = ../figures
 FIGS = $(FIGDIR)/allocate-init.eps \
@@ -21,12 +21,12 @@ FIGS = $(FIGDIR)/allocate-init.eps \
        $(FIGDIR)/queued-job-init.eps \
        $(FIGDIR)/slurm-arch.eps
 
-PLOTS = $(FIGDIR)/times.eps 
+PLOTS = $(FIGDIR)/times.eps
 
 BIB = ../common/project.bib references.bib
 
 %.eps: %.dia
-	dia --nosplash -e $@ $< 
+	dia --nosplash -e $@ $<
 %.eps: %.gpl
 	gnuplot $<
 %.eps: %.fig
@@ -36,9 +36,9 @@ BIB = ../common/project.bib references.bib
 %.ps: %.dvi
 	dvips -K -t letter -o $(@F) $(<F)
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
-all: $(REPORT).ps 
+all: $(REPORT).ps
 
 
 $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
@@ -51,6 +51,6 @@ $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
 view: $(REPORT).ps
 	ghostview $(REPORT) &
 
-clean: 
+clean:
 	rm -f *~ *% *.dvi *.log *.aux $(REPORT).ps *.blg *.bbl #*.eps #*.gif *.ps
-	      
+
diff --git a/doc/jsspp/abstract.tex b/doc/jsspp/abstract.tex
index 4a9c2f961bde1f7410a4f5bd9de6987892ebe90d..8c6b0941c9b62901bb868bf01874a3200250fbf3 100644
--- a/doc/jsspp/abstract.tex
+++ b/doc/jsspp/abstract.tex
@@ -1,12 +1,12 @@
 \begin{abstract}
-A new cluster resource management system called 
-Simple Linux Utility Resource Management (SLURM) is described 
-in this paper. SLURM, initially developed for large 
-Linux clusters at the Lawrence Livermore National Laboratory (LLNL), 
-is a simple cluster manager that can scale to thousands of processors. 
-SLURM is designed to be flexible and fault-tolerant and can be ported to 
-other clusters of different size and architecture with minimal effort. 
-We are certain that SLURM will benefit both users and system architects 
-by providing them with a simple, robust, and highly scalable parallel 
+A new cluster resource management system called
+Simple Linux Utility Resource Management (SLURM) is described
+in this paper. SLURM, initially developed for large
+Linux clusters at the Lawrence Livermore National Laboratory (LLNL),
+is a simple cluster manager that can scale to thousands of processors.
+SLURM is designed to be flexible and fault-tolerant and can be ported to
+other clusters of different size and architecture with minimal effort.
+We are certain that SLURM will benefit both users and system architects
+by providing them with a simple, robust, and highly scalable parallel
 job execution environment for their cluster system.
 \end{abstract}
diff --git a/doc/jsspp/architecture.tex b/doc/jsspp/architecture.tex
index 56eea0efea70a31bfe2ab71a550f90ab12ae2080..c14c4b00b853771fd0ff41a0956d7036155c7a48 100644
--- a/doc/jsspp/architecture.tex
+++ b/doc/jsspp/architecture.tex
@@ -1,31 +1,31 @@
 \section{SLURM Architecture}
 
 As a cluster resource manager, SLURM has three key functions.  First,
-it allocates exclusive and/or non-exclusive access to resources to users for 
-some duration of time so they can perform work.  Second, it provides 
-a framework for starting, executing, and monitoring work  
-on the set of allocated nodes.  Finally, it arbitrates 
+it allocates exclusive and/or non-exclusive access to resources to users for
+some duration of time so they can perform work.  Second, it provides
+a framework for starting, executing, and monitoring work
+on the set of allocated nodes.  Finally, it arbitrates
 conflicting requests for resources by managing a queue of pending work.
 Users and system administrators interact with SLURM using simple commands.
 
-%Users interact with SLURM through four command line utilities: 
+%Users interact with SLURM through four command line utilities:
 %\srun\ for submitting a job for execution and optionally controlling it
-%interactively, 
-%\scancel\ for early termination of a pending or running job, 
-%\squeue\ for monitoring job queues, and 
+%interactively,
+%\scancel\ for early termination of a pending or running job,
+%\squeue\ for monitoring job queues, and
 %\sinfo\ for monitoring partition and overall system state.
 %System administrators perform privileged operations through an additional
 %command line utility: {\tt scontrol}.
 %
-%The central controller daemon, {\tt slurmctld}, maintains the global state 
+%The central controller daemon, {\tt slurmctld}, maintains the global state
 %and directs operations.
-%Compute nodes simply run a \slurmd\ daemon (similar to a remote shell 
-%daemon) to export control to SLURM.  
+%Compute nodes simply run a \slurmd\ daemon (similar to a remote shell
+%daemon) to export control to SLURM.
 %
-%SLURM is not a sophisticated batch system.  
-%In fact, it was expressly designed to provide high-performance 
-%parallel job management while leaving scheduling decisions to an 
-%external entity as will be described later. 
+%SLURM is not a sophisticated batch system.
+%In fact, it was expressly designed to provide high-performance
+%parallel job management while leaving scheduling decisions to an
+%external entity as will be described later.
 
 \begin{figure}[tb]
 \centerline{\epsfig{file=../figures/arch.eps,scale=0.40}}
@@ -38,19 +38,19 @@ SLURM consists of a \slurmd\ daemon
 running on each compute node, a central \slurmctld\ daemon running on
 a management node (with optional fail-over twin), and five command line
 utilities,
-% {\tt srun}, {\tt scancel}, {\tt sinfo}, {\tt squeue}, and {\tt scontrol}, 
-which can run anywhere in the cluster.  
+% {\tt srun}, {\tt scancel}, {\tt sinfo}, {\tt squeue}, and {\tt scontrol},
+which can run anywhere in the cluster.
 
 The entities managed by these SLURM daemons include nodes, the
 compute resource in SLURM, partitions, which group nodes into
 logical disjoint sets, jobs, or allocations of resources assigned
 to a user for a specified amount of time, and job steps, which are
-sets of tasks within a job.  
-Each job is allocated nodes within a single partition. 
+sets of tasks within a job.
+Each job is allocated nodes within a single partition.
 Once a job is assigned a set of nodes, the user is able to initiate
 parallel work in the form of job steps in any configuration within the
 allocation. For instance a single job step may be started which utilizes
-all nodes allocated to the job, or several job steps may independently 
+all nodes allocated to the job, or several job steps may independently
 use a portion of the allocation.
 
 %\begin{figure}[tcb]
@@ -65,7 +65,7 @@ use a portion of the allocation.
 %job, with one job step utilizing the full allocation of that job.
 %The job in Partition 2 has only one job step using half of the original
 %job allocation.
-%That job might initiate additional job step(s) to utilize 
+%That job might initiate additional job step(s) to utilize
 %the remaining nodes of its allocation.
 
 \begin{figure}[tb]
@@ -81,19 +81,19 @@ are explained in more detail below.
 \subsection{SLURM Local Daemon (Slurmd)}
 
 The \slurmd\ is a multi-threaded daemon running on each compute node.
-It reads the common SLURM configuration file and recovers any 
-previously saved state information, 
-notifies the controller that it is active, waits for work, 
-executes the work, returns status, and waits for more work.  
+It reads the common SLURM configuration file and recovers any
+previously saved state information,
+notifies the controller that it is active, waits for work,
+executes the work, returns status, and waits for more work.
 Since it initiates jobs for other users, it must run with root privilege.
-%It also asynchronously exchanges node and job status information with {\tt slurmctld}.  
-The only job information it has at any given time pertains to its 
+%It also asynchronously exchanges node and job status information with {\tt slurmctld}.
+The only job information it has at any given time pertains to its
 currently executing jobs.
 The \slurmd\ performs five major tasks.
 
 \begin{itemize}
-\item {\em Machine and Job Status Services}:  Respond to controller 
-requests for machine and job state information, and send asynchronous 
+\item {\em Machine and Job Status Services}:  Respond to controller
+requests for machine and job state information, and send asynchronous
 reports of some state changes (e.g. \slurmd\ startup) to the controller.
 
 \item {\em Remote Execution}: Start, monitor, and clean up after a set
@@ -121,19 +121,19 @@ termination requests to any set of locally managed processes.
 \subsection{SLURM Central Daemon (Slurmctld)}
 
 Most SLURM state information is maintained by the controller, {\tt slurmctld}.
-The \slurmctld\ is multi-threaded with independent read and write locks 
-for the various data structures to enhance scalability. 
-When \slurmctld\ starts, it reads the SLURM configuration file.  
+The \slurmctld\ is multi-threaded with independent read and write locks
+for the various data structures to enhance scalability.
+When \slurmctld\ starts, it reads the SLURM configuration file.
 It can also read additional state information
 from a checkpoint file generated by a previous execution of {\tt slurmctld}.
-Full controller state information is written to 
+Full controller state information is written to
 disk periodically with incremental changes written to disk immediately
-for fault-tolerance.  
+for fault-tolerance.
 The \slurmctld\ runs in either master or standby mode, depending on the
 state of its fail-over twin, if any.
 The \slurmctld\ need not execute with root privilege.
-%In fact, it is recommended that a unique user entry be created for 
-%executing \slurmctld\ and that user must be identified in the SLURM 
+%In fact, it is recommended that a unique user entry be created for
+%executing \slurmctld\ and that user must be identified in the SLURM
 %configuration file as {\tt SlurmUser}.
 The \slurmctld\ consists of three major components:
 
@@ -141,7 +141,7 @@ The \slurmctld\ consists of three major components:
 \item {\em Node Manager}: Monitors the state of each node in
 the cluster.  It polls {\tt slurmd}'s for status periodically and
 receives state change notifications from \slurmd\ daemons asynchronously.
-It ensures that nodes have the prescribed configuration before being 
+It ensures that nodes have the prescribed configuration before being
 considered available for use.
 
 \item {\em Partition Manager}: Groups nodes into non-overlapping sets called
@@ -151,19 +151,19 @@ to jobs based upon node and partition states and configurations. Requests
 to initiate jobs come from the Job Manager.  The \scontrol\ may be used
 to administratively alter node and partition configurations.
 
-\item {\em Job Manager}: Accepts user job requests and places pending 
-jobs in a priority ordered queue. 
+\item {\em Job Manager}: Accepts user job requests and places pending
+jobs in a priority ordered queue.
 The Job Manager is awakened on a periodic basis and whenever there
 is a change in state that might permit a job to begin running, such
 as job completion, job submission, partition-up transition,
 node-up transition, etc.  The Job Manager then makes a pass
-through the priority-ordered job queue. The highest priority jobs 
-for each partition are allocated resources as possible. As soon as an 
-allocation failure occurs for any partition, no lower-priority jobs for 
-that partition are considered for initiation. 
+through the priority-ordered job queue. The highest priority jobs
+for each partition are allocated resources as possible. As soon as an
+allocation failure occurs for any partition, no lower-priority jobs for
+that partition are considered for initiation.
 After completing the scheduling cycle, the Job Manager's scheduling
 thread sleeps.  Once a job has been allocated resources, the Job Manager
-transfers necessary state information to those nodes, permitting it 
+transfers necessary state information to those nodes, permitting it
 to commence execution.  When the Job Manager detects that
 all nodes associated with a job have completed their work, it initiates
 clean-up and performs another scheduling cycle as described above.
diff --git a/doc/jsspp/conclusions.tex b/doc/jsspp/conclusions.tex
index 33f6f492ebf20bd2e9af2dc5917624fc4da648d3..69c946c47ba26d0e99da07dd292ccfde19792e20 100644
--- a/doc/jsspp/conclusions.tex
+++ b/doc/jsspp/conclusions.tex
@@ -1,22 +1,22 @@
 \section{Conclusion and Future Plans}
 
-We have presented in this paper an overview of SLURM, a simple, highly scalable, robust, 
+We have presented in this paper an overview of SLURM, a simple, highly scalable, robust,
 and portable cluster resource management system.
 The contribution of this work is that we have provided a immediately-available
-and open-source tool that virtually anybody can use to efficiently manage clusters of 
+and open-source tool that virtually anybody can use to efficiently manage clusters of
 different sizes and architecture.
-%We expect SLURM to begin production use on LLNL Linux clusters 
-%starting in March 2003 and be available for distribution shortly 
-%thereafter. 
+%We expect SLURM to begin production use on LLNL Linux clusters
+%starting in March 2003 and be available for distribution shortly
+%thereafter.
 
-Looking ahead, we anticipate adding support for additional 
+Looking ahead, we anticipate adding support for additional
 operating systems.
 % (IA64 and x86-64) and interconnects (InfiniBand
-%and the IBM Blue Gene\cite{BlueGene2002} system\footnote{Blue Gene 
-%has a different interconnect than any supported by SLURM and 
-%a 3-D topography with restrictive allocation constraints.}). 
-We anticipate adding a job preempt/resume capability, which will 
-provide an external scheduler the infrastructure 
+%and the IBM Blue Gene\cite{BlueGene2002} system\footnote{Blue Gene
+%has a different interconnect than any supported by SLURM and
+%a 3-D topography with restrictive allocation constraints.}).
+We anticipate adding a job preempt/resume capability, which will
+provide an external scheduler the infrastructure
 required to perform gang scheduling, and a checkpoint/restart capability.
 We also plan to use the SLURM for IBM's Blue Gene/L platform~\cite{BGL} by incorporating a capability
 to manage jobs on a three-dimensional torus machine into the SLURM.
diff --git a/doc/jsspp/interaction.tex b/doc/jsspp/interaction.tex
index 9f0246293d54b29b6efd4f9c810d67110df171c2..d2c2a3473a0baff6f0e86fdb9658568a094938f9 100644
--- a/doc/jsspp/interaction.tex
+++ b/doc/jsspp/interaction.tex
@@ -1,24 +1,24 @@
 \section{Scheduling Infrastructure}
 
-Scheduling parallel computers is a very complex matter.  
-Several good public domain schedulers exist with the most 
-popular being the Maui Scheduler\cite{Jackson2001,Maui2002}. 
-The scheduler used at our site, DPCS\cite{DPCS2002}, is quite 
-sophisticated and has over 150,000 lines of code. 
-We felt no need to address scheduling issues within SLURM, but 
-have instead developed a resource manager with a rich set of 
-application programming interfaces (APIs) and the flexibility 
-to satisfy the needs of others working on scheduling issues.  
-SLURM's default scheduler implements First-In First-Out (FIFO). 
-An external entity can establish a job's initial priority 
+Scheduling parallel computers is a very complex matter.
+Several good public domain schedulers exist with the most
+popular being the Maui Scheduler\cite{Jackson2001,Maui2002}.
+The scheduler used at our site, DPCS\cite{DPCS2002}, is quite
+sophisticated and has over 150,000 lines of code.
+We felt no need to address scheduling issues within SLURM, but
+have instead developed a resource manager with a rich set of
+application programming interfaces (APIs) and the flexibility
+to satisfy the needs of others working on scheduling issues.
+SLURM's default scheduler implements First-In First-Out (FIFO).
+An external entity can establish a job's initial priority
 through a plugin.
-An external scheduler may also submit, signal, hold, reorder and 
+An external scheduler may also submit, signal, hold, reorder and
 terminate jobs via the API.
 
 \subsection{Resource Specification}
 
-The \srun\ command and corresponding API have a wide of resource 
-specifications available. The \srun\ resource specification options 
+The \srun\ command and corresponding API have a wide of resource
+specifications available. The \srun\ resource specification options
 are described below.
 
 \subsubsection{Geometry Specification}
@@ -27,64 +27,64 @@ These options describe how many nodes and tasks are needed as
 well as describing the distribution of tasks across the nodes.
 
 \begin{itemize}
-\item {\tt cpus-per-task=<number>}: 
-Specifies the number of processors cpus) required for each task 
-(or process) to run. 
-This may be useful if the job is multithreaded and requires more 
-than one cpu per task for optimal performance. 
+\item {\tt cpus-per-task=<number>}:
+Specifies the number of processors cpus) required for each task
+(or process) to run.
+This may be useful if the job is multithreaded and requires more
+than one cpu per task for optimal performance.
 The default is one cpu per process.
 
-\item {\tt nodes=<number>[-<number>]}: 
-Specifies the number of nodes required by this job. 
-The node count may be either a specific value or a minimum and maximum 
-node count separated by a hyphen. 
-The partition's node limits supersede those of the job. 
-If a job's node limits are completely outside of the range permitted 
-for it's associated partition, the job will be left in a PENDING state. 
-The default is to allocate one cpu per process, such that nodes with 
+\item {\tt nodes=<number>[-<number>]}:
+Specifies the number of nodes required by this job.
+The node count may be either a specific value or a minimum and maximum
+node count separated by a hyphen.
+The partition's node limits supersede those of the job.
+If a job's node limits are completely outside of the range permitted
+for it's associated partition, the job will be left in a PENDING state.
+The default is to allocate one cpu per process, such that nodes with
 one cpu will run one task, nodes with 2 cpus will run two tasks, etc.
-The distribution of processes across nodes may be controlled using 
+The distribution of processes across nodes may be controlled using
 this option along with the {\tt nproc} and {\tt cpus-per-task} options.
 
-\item {\tt nprocs=<number>}: 
-Specifies the number of processes to run. 
-Specification of the number of processes per node may be achieved 
-with the {\tt cpus-per-task} and {\tt nodes} options. 
-The default is one process per node unless {\tt cpus-per-task} 
+\item {\tt nprocs=<number>}:
+Specifies the number of processes to run.
+Specification of the number of processes per node may be achieved
+with the {\tt cpus-per-task} and {\tt nodes} options.
+The default is one process per node unless {\tt cpus-per-task}
 explicitly specifies otherwise.
 
 \end{itemize}
 
 \subsubsection{Constraint Specification}
 
-These options describe what configuration requirements of the nodes 
+These options describe what configuration requirements of the nodes
 which can be used.
 
 \begin{itemize}
 
-\item {\tt constraint=list}: 
+\item {\tt constraint=list}:
 Specify a list of constraints. The list of constraints is
 a comma separated list of features that have been assigned to the
 nodes by the slurm administrator. If no nodes have the requested
 feature, then the job will be rejected.
 
-\item {\tt contiguous=[yes|no]}: 
-demand a contiguous range of nodes. The default is "yes". 
+\item {\tt contiguous=[yes|no]}:
+demand a contiguous range of nodes. The default is "yes".
 
-\item {\tt mem=<number>}: 
+\item {\tt mem=<number>}:
 Specify a minimum amount of real memory per node (in megabytes).
 
-\item {\tt mincpus=<number>}: 
+\item {\tt mincpus=<number>}:
 Specify minimum number of cpus per node.
 
-\item {\tt partition=name}: 
-Specifies the partition to be used. 
+\item {\tt partition=name}:
+Specifies the partition to be used.
 There will be a default partition specified in the SLURM configuration file.
 
-\item {\tt tmp=<number>}: 
+\item {\tt tmp=<number>}:
 Specify a minimum amount of temporary disk space per node (in megabytes).
 
-\item {\tt vmem=<number>}: 
+\item {\tt vmem=<number>}:
 Specify a minimum amount of virtual memory per node (in megabytes).
 
 \end{itemize}
@@ -93,139 +93,139 @@ Specify a minimum amount of virtual memory per node (in megabytes).
 
 \begin{itemize}
 
-\item {\tt batch}: 
-Submit in "batch mode." 
+\item {\tt batch}:
+Submit in "batch mode."
 srun will make a copy of the executable file (a script) and submit therequest for execution when resouces are available.
-srun will terminate after the request has been submitted. 
-The executable file will run on the first node allocated to the 
+srun will terminate after the request has been submitted.
+The executable file will run on the first node allocated to the
 job and must contain srun commands to initiate parallel tasks.
 
-\item {\tt exclude=[filename|node\_list]}: 
-Request that a specific list of hosts not be included in the resources 
-allocated to this job. The host list will be assumed to be a filename 
-if it contains a "/"character. If some nodes are suspect, this option 
+\item {\tt exclude=[filename|node\_list]}:
+Request that a specific list of hosts not be included in the resources
+allocated to this job. The host list will be assumed to be a filename
+if it contains a "/"character. If some nodes are suspect, this option
 may be used to avoid using them.
 
-\item {\tt immediate}: 
-Exit if resources are not immediately available. 
+\item {\tt immediate}:
+Exit if resources are not immediately available.
 By default, the request will block until resources become available.
 
-\item {\tt nodelist=[filename|node\_list]}: 
+\item {\tt nodelist=[filename|node\_list]}:
 Request a specific list of hosts. The job will contain at least
 these hosts. The list may be specified as a comma-separated list of
 hosts, a range of hosts (host[1-5,7,...] for example), or a filename.
 The host list will be assumed to be a filename if it contains a "/"
 character.
 
-\item {\tt overcommit}: 
-Overcommit resources. 
-Normally the job will not be allocated more than one process per cpu. 
+\item {\tt overcommit}:
+Overcommit resources.
+Normally the job will not be allocated more than one process per cpu.
 By specifying this option, you are explicitly allowing more than one process
-per cpu. 
+per cpu.
 
-\item {\tt share}: 
-The job can share nodes with other running jobs. This may result in faster job 
+\item {\tt share}:
+The job can share nodes with other running jobs. This may result in faster job
 initiation and higher system utilization, but lower application performance.
 
-\item {\tt time=<number>}: 
-Establish a time limit to terminate the job after the specified number of 
-minutes. If the job's time limit exceed's the partition's time limit, the 
-job will be left in a PENDING state. The default value is the partition's 
-time limit. When the time limit is reached, the job's processes are sent 
+\item {\tt time=<number>}:
+Establish a time limit to terminate the job after the specified number of
+minutes. If the job's time limit exceed's the partition's time limit, the
+job will be left in a PENDING state. The default value is the partition's
+time limit. When the time limit is reached, the job's processes are sent
 SIGXCPU followed by SIGKILL. The interval between signals is configurable.
 
 \end{itemize}
 
-All parameters may be specified using single letter abbreviations 
-("-n" instead of "--nprocs=4"). 
-Environment variable can also be used to specify many parameters. 
-Environment variable will be set to the actual number of nodes and 
+All parameters may be specified using single letter abbreviations
+("-n" instead of "--nprocs=4").
+Environment variable can also be used to specify many parameters.
+Environment variable will be set to the actual number of nodes and
 processors allocated
-In the event that the node count specification is a range, the 
-application could inspect the environment variables to scale the 
+In the event that the node count specification is a range, the
+application could inspect the environment variables to scale the
 problem appropriately.
-To request four processes with one cpu per task the command line would 
+To request four processes with one cpu per task the command line would
 look like this: {\em srun --nprocs=4 --cpus-per-task=1 hostname}.
-Note that if multiple resource specifications are provided, resources 
-will be allocated so as to satisfy the all specifications. 
-For example a request with the specification {\tt nodelist=dev[0-1]} 
+Note that if multiple resource specifications are provided, resources
+will be allocated so as to satisfy the all specifications.
+For example a request with the specification {\tt nodelist=dev[0-1]}
 and {\tt nodes=4} may be satisfied with nodes {\tt dev[0-3]}.
 
 \subsection{The Maui Scheduler and SLURM}
 
-{\em The integration of the Maui Scheduler with SLURM was 
-just beginning at the time this paper was written. Full 
+{\em The integration of the Maui Scheduler with SLURM was
+just beginning at the time this paper was written. Full
 integration is anticipated by the time of the conference.
-This section will be modified as needed based upon that 
+This section will be modified as needed based upon that
 experience.}
 
-The Maui Scheduler is integrated with SLURM through the 
-previously described plugin mechanism. 
-The previously described SLURM commands are used for 
-all job submissions and interactions. 
-When a job is submitted to SLURM, a Maui Scheduler module 
-is called to establish its initial priority. 
-Another Maui Scheduler module is called at the beginning 
-of each SLURM scheduling cycle. 
-Maui can use this opportunity to change priorities of 
+The Maui Scheduler is integrated with SLURM through the
+previously described plugin mechanism.
+The previously described SLURM commands are used for
+all job submissions and interactions.
+When a job is submitted to SLURM, a Maui Scheduler module
+is called to establish its initial priority.
+Another Maui Scheduler module is called at the beginning
+of each SLURM scheduling cycle.
+Maui can use this opportunity to change priorities of
 pending jobs or take other actions.
 
 \subsection{DPCS and SLURM}
 
-DPCS is a meta-batch system designed for use within a single 
-administrative domain (all computers have a common user ID 
-space and exist behind a firewall). 
-DPCS presents users with a uniform set of commands for a wide 
-variety of computers and underlying resource managers (e.g. 
-LoadLeveler on IBM SP systems, SLURM on Linux clusters, NQS, 
-etc.). 
-It was developed in 1991 and has been in production use since 
-1992. 
-While Globus\cite{Globus2002} has the ability to span administrative 
+DPCS is a meta-batch system designed for use within a single
+administrative domain (all computers have a common user ID
+space and exist behind a firewall).
+DPCS presents users with a uniform set of commands for a wide
+variety of computers and underlying resource managers (e.g.
+LoadLeveler on IBM SP systems, SLURM on Linux clusters, NQS,
+etc.).
+It was developed in 1991 and has been in production use since
+1992.
+While Globus\cite{Globus2002} has the ability to span administrative
 domains, both systems could interface with SLURM in a similar fashion.
 
 Users submit jobs directly to DPCS.
-The job consists of a script and an assortment of constraints. 
-Unless specified by constraints, the script can execute on 
-a variety of different computers with various architectures 
-and resource managers. 
-DPCS monitors the state of these computers and performs backfill 
-scheduling across the computers with jobs under its management. 
-When DPCS decides that resources are available to immediately 
-initiate some job of its choice, it takes the following 
+The job consists of a script and an assortment of constraints.
+Unless specified by constraints, the script can execute on
+a variety of different computers with various architectures
+and resource managers.
+DPCS monitors the state of these computers and performs backfill
+scheduling across the computers with jobs under its management.
+When DPCS decides that resources are available to immediately
+initiate some job of its choice, it takes the following
 actions:
 \begin{itemize}
-\item Transfers the job script and assorted state information to 
+\item Transfers the job script and assorted state information to
 the computer upon which the job is to execute.
 
-\item Allocates resources for the job. 
-The resource allocation is performed as user {\em root} and SLURM 
-is configured to restrict resource allocations in the relevent 
+\item Allocates resources for the job.
+The resource allocation is performed as user {\em root} and SLURM
+is configured to restrict resource allocations in the relevent
 partitions to user {\em root}.
 This prevents user resource allocations to that partition
-except through DPCS, which has complete control over job 
+except through DPCS, which has complete control over job
 scheduling there.
-The allocation request specifies the target user ID, job ID 
+The allocation request specifies the target user ID, job ID
 (to match DPCS' own numbering scheme) and specific nodes to use.
 
 \item Spawns the job script as the desired user.
-This script may contain multiple instantiations of \srun\ 
-to initiate multiple job steps. 
+This script may contain multiple instantiations of \srun\
+to initiate multiple job steps.
 
-\item Monitor the job's state and resource consumption. 
-This is performed using DPCS daemons on each compute node 
-recording CPU time, real memory and virtual memory consumed. 
+\item Monitor the job's state and resource consumption.
+This is performed using DPCS daemons on each compute node
+recording CPU time, real memory and virtual memory consumed.
 
-\item Cancel the job as needed when it has reached its time limit. 
-The SLURM job is initiated with an infinite time limit. 
-DPCS mechanisms are used exclusively to manage job time limits. 
+\item Cancel the job as needed when it has reached its time limit.
+The SLURM job is initiated with an infinite time limit.
+DPCS mechanisms are used exclusively to manage job time limits.
 
 \end{itemize}
 
-Much of the SLURM functionality is left unused in the DPCS 
-controlled environment. 
-It should be noted that DPCS is typically configured to not 
-control all partitions. 
-A small (debug) partition is typically configured for smaller 
-jobs and users may directly use SLURM commands to access that 
+Much of the SLURM functionality is left unused in the DPCS
+controlled environment.
+It should be noted that DPCS is typically configured to not
+control all partitions.
+A small (debug) partition is typically configured for smaller
+jobs and users may directly use SLURM commands to access that
 partition.
diff --git a/doc/jsspp/intro.tex b/doc/jsspp/intro.tex
index 113f481b8e97c9e2e9132db67eda7e59c4ee46fd..32ed13b1b57efdfb5a414e0834877ddcdd63a1f6 100644
--- a/doc/jsspp/intro.tex
+++ b/doc/jsspp/intro.tex
@@ -8,101 +8,101 @@ The continuous decrease in the price of the COTS parts in conjunction with
 the good scalability of the cluster architecture has now made it feasible to economically
 build large-scale clusters with thousands of processors~\cite{MCRWeb,PCRWeb}.
 
-An essential component that is needed to harness such a computer is a 
+An essential component that is needed to harness such a computer is a
 resource management system.
 A resource management system (or resource manager) performs such crucial tasks as
 scheduling user jobs, monitoring machine and job status, launching user applications, and
 managing machine configuration,
-An ideal resource manager should be simple, efficient, scalable, fault-tolerant, 
+An ideal resource manager should be simple, efficient, scalable, fault-tolerant,
 and portable.
 
-Unfortunately there are no open-source resource management systems currently available 
+Unfortunately there are no open-source resource management systems currently available
 which satisfy these requirements.
-A survey~\cite{Jette02} has revealed that many existing resource managers have poor scalability and fault-tolerance rendering them unsuitable for large clusters having 
+A survey~\cite{Jette02} has revealed that many existing resource managers have poor scalability and fault-tolerance rendering them unsuitable for large clusters having
 thousands of processors~\cite{LoadLevelerWeb,LoadLevelerManual}.
-While some proprietary cluster managers are suitable for large clusters, 
-they are typically designed for particular computer systems and/or 
-interconnects~\cite{RMS,LoadLevelerWeb,LoadLevelerManual}. 
-Proprietary systems can also be expensive and unavailable in source-code form. 
-Furthermore, proprietary cluster management functionality is usually provided as a 
-part of a specific job scheduling system package. 
-This mandates the use of the given scheduler just to manage a cluster, 
+While some proprietary cluster managers are suitable for large clusters,
+they are typically designed for particular computer systems and/or
+interconnects~\cite{RMS,LoadLevelerWeb,LoadLevelerManual}.
+Proprietary systems can also be expensive and unavailable in source-code form.
+Furthermore, proprietary cluster management functionality is usually provided as a
+part of a specific job scheduling system package.
+This mandates the use of the given scheduler just to manage a cluster,
 even though the scheduler does not necessarily meet the need of organization that hosts the cluster.
 Clear separation of the cluster management functionality from scheduling policy is desired.
 
-This observation led us to set out to design a simple, highly scalable, and 
-portable resource management system. 
-The result of this effort is Simple Linux Utility Resource Management 
+This observation led us to set out to design a simple, highly scalable, and
+portable resource management system.
+The result of this effort is Simple Linux Utility Resource Management
 (SLURM\footnote{A tip of the hat to Matt Groening and creators of {\em Futurama},
-where Slurm is the most popular carbonated beverage in the universe.}). 
+where Slurm is the most popular carbonated beverage in the universe.}).
 SLURM was developed with the following design goals:
 
 \begin{itemize}
 \item {\em Simplicity}: SLURM is simple enough to allow motivated end-users
-to understand its source code and add functionality.  The authors will 
-avoid the temptation to add features unless they are of general appeal. 
+to understand its source code and add functionality.  The authors will
+avoid the temptation to add features unless they are of general appeal.
 
-\item {\em Open Source}: SLURM is available to everyone and will remain free. 
-Its source code is distributed under the GNU General Public 
+\item {\em Open Source}: SLURM is available to everyone and will remain free.
+Its source code is distributed under the GNU General Public
 License~\cite{GPLWeb}.
 
-\item {\em Portability}: SLURM is written in the C language, with a GNU 
-{\em autoconf} configuration engine.  
-While initially written for Linux, other UNIX-like operating systems 
+\item {\em Portability}: SLURM is written in the C language, with a GNU
+{\em autoconf} configuration engine.
+While initially written for Linux, other UNIX-like operating systems
 should be easy porting targets.
-SLURM also supports a general purpose {\em plugin} mechanism, which 
-permits a variety of different infrastructures to be easily supported. 
-The SLURM configuration file specifies which set of plugin modules 
-should be used. 
+SLURM also supports a general purpose {\em plugin} mechanism, which
+permits a variety of different infrastructures to be easily supported.
+The SLURM configuration file specifies which set of plugin modules
+should be used.
 
 \item {\em Interconnect independence}: SLURM supports UDP/IP based
-communication as well as the Quadrics Elan3 and Myrinet interconnects.  
-Adding support for other interconnects is straightforward and utilizes 
+communication as well as the Quadrics Elan3 and Myrinet interconnects.
+Adding support for other interconnects is straightforward and utilizes
 the plugin mechanism described above.
 
 \item {\em Scalability}: SLURM is designed for scalability to clusters of
-thousands of nodes. 
-Jobs may specify their resource requirements in a variety of ways 
-including requirements options and ranges, potentially permitting 
+thousands of nodes.
+Jobs may specify their resource requirements in a variety of ways
+including requirements options and ranges, potentially permitting
 faster initiation than otherwise possible.
 
 \item {\em Robustness}: SLURM can handle a variety of failure modes
-without terminating workloads, including crashes of the node running 
-the SLURM controller. 
-User jobs may be configured to continue execution despite the failure 
-of one or more nodes on which they are executing. 
-Nodes allocated to a job are available for reuse as soon as the job(s) 
-allocated to that node terminate. 
-If some nodes fail to complete job termination 
-in a timely fashion due to hardware of software problems, only the 
+without terminating workloads, including crashes of the node running
+the SLURM controller.
+User jobs may be configured to continue execution despite the failure
+of one or more nodes on which they are executing.
+Nodes allocated to a job are available for reuse as soon as the job(s)
+allocated to that node terminate.
+If some nodes fail to complete job termination
+in a timely fashion due to hardware of software problems, only the
 scheduling of those tardy nodes will be effected.
 
-\item {\em Secure}: SLURM employs crypto technology to authenticate 
-users to services and services to each other with a variety of options 
-available through the plugin mechanism.  
-SLURM does not assume that its networks are physically secure, 
-but does assume that the entire cluster is within a single 
-administrative domain with a common user base across the 
+\item {\em Secure}: SLURM employs crypto technology to authenticate
+users to services and services to each other with a variety of options
+available through the plugin mechanism.
+SLURM does not assume that its networks are physically secure,
+but does assume that the entire cluster is within a single
+administrative domain with a common user base across the
 entire cluster.
 
-\item {\em System administrator friendly}: SLURM is configured a 
-simple configuration file and minimizes distributed state.  
-Its configuration may be changed at any time without impacting running jobs. 
+\item {\em System administrator friendly}: SLURM is configured a
+simple configuration file and minimizes distributed state.
+Its configuration may be changed at any time without impacting running jobs.
 Heterogeneous nodes within a cluster may be easily managed.
-SLURM interfaces are usable by scripts and its behavior is highly 
+SLURM interfaces are usable by scripts and its behavior is highly
 deterministic.
 
 \end{itemize}
 
-The main contribution of our work is that we have provided a readily available 
-tool that anybody can use to efficiently manage clusters of different size and architecture. 
-SLURM is highly scalable\footnote{It was observed that it took less than five seconds for SLURM to launch a 1900-task job over 950 nodes on recently installed cluster at Lawrence Livermore National Laboratory.}. 
-The SLURM can be easily ported to any cluster system with minimal effort with its plugin 
+The main contribution of our work is that we have provided a readily available
+tool that anybody can use to efficiently manage clusters of different size and architecture.
+SLURM is highly scalable\footnote{It was observed that it took less than five seconds for SLURM to launch a 1900-task job over 950 nodes on recently installed cluster at Lawrence Livermore National Laboratory.}.
+The SLURM can be easily ported to any cluster system with minimal effort with its plugin
 capability and can be used with any meta-batch scheduler or a Grid resource broker~\cite{Gridbook}
 with its well-defined interfaces.
 
-The rest of the paper is organized as follows. 
-Section 2 describes the architecture of SLURM in detail. Section 3 discusses the services provided by SLURM followed by performance study of 
-SLURM in Section 4. Brief survey of existing cluster management systems is presented in Section 5. 
+The rest of the paper is organized as follows.
+Section 2 describes the architecture of SLURM in detail. Section 3 discusses the services provided by SLURM followed by performance study of
+SLURM in Section 4. Brief survey of existing cluster management systems is presented in Section 5.
 %Section 6 describes how the SLURM can be used with more sphisticated external schedulers.
 Concluding remarks and future development plan of SLURM is given in Section 6.
diff --git a/doc/jsspp/jsspp.tex b/doc/jsspp/jsspp.tex
index 6c6fef824a450c7162de4fdf70c34a3c48a88e48..ceca2b865b4d96e3b0b413ff1770d9fe3500cfcb 100644
--- a/doc/jsspp/jsspp.tex
+++ b/doc/jsspp/jsspp.tex
@@ -41,7 +41,7 @@ Laboratory under Contract No. W-7405-Eng-48. Document UCRL-JC-147996.}}
 
 \author{Morris A. Jette \and Andy B. Yoo \and Mark Grondona}
 
-% We cheat here to easily get the desired allignment 
+% We cheat here to easily get the desired allignment
 %\date{\{jette1,mgrondona\}@llnl.gov}
 \date{Lawrence Livermore National Laboratory\\
 Livermore, CA 94551\\
@@ -63,7 +63,7 @@ Livermore, CA 94551\\
 
 \section*{Acknowledgments}
 
-Additional programmers are responsible for the development of 
+Additional programmers are responsible for the development of
 SLURM include: Chris Dunlap, Joey Ekstrom, Jim Garlick, Kevin Tew
 and Jay Windley.
 
diff --git a/doc/jsspp/perf.tex b/doc/jsspp/perf.tex
index 9a6596ef247ea9fabaaf63e990aa502768b55ae4..12cadd938626380300e4a33714a4d348ea6fd6be 100644
--- a/doc/jsspp/perf.tex
+++ b/doc/jsspp/perf.tex
@@ -7,10 +7,10 @@
 \end{figure}
 
 We were able to perform some SLURM tests on a 1000 node cluster at LLNL.
-Some development was still underway at that time and 
-tuning had not been performed. The results for executing simple 'hostname' program 
-on two tasks per node and various node counts is show 
-in Figure~\ref{timing}. We found SLURM performance to be comparable 
-to the Quadrics Resource Management System (RMS)~\cite{RMS} 
-for all job sizes and about 80 times faster than IBM 
+Some development was still underway at that time and
+tuning had not been performed. The results for executing simple 'hostname' program
+on two tasks per node and various node counts is show
+in Figure~\ref{timing}. We found SLURM performance to be comparable
+to the Quadrics Resource Management System (RMS)~\cite{RMS}
+for all job sizes and about 80 times faster than IBM
 LoadLeveler~\cite{LoadLevelerWeb,LoadLevelerManual} at tested job sizes.
diff --git a/doc/jsspp/references.bib b/doc/jsspp/references.bib
index 6e4bcd9a11259ac346ba24670eddf343ec7c783d..c93e62ae4dd783adfeddd4bce97514a493b254fd 100644
--- a/doc/jsspp/references.bib
+++ b/doc/jsspp/references.bib
@@ -24,7 +24,7 @@
 	number = "N/A"}
 @techreport{Suzuoka95,
 	author = "T. Suzuoka and J. Subhlok and T. Gross",
-	title = "{E}valuating {J}ob {S}cheduling {T}echniques for {H}ighly {P}arallel {C}omputers", 
+	title = "{E}valuating {J}ob {S}cheduling {T}echniques for {H}ighly {P}arallel {C}omputers",
 	institution = "{S}chool of {C}omputer {S}cience, {C}arbegie {M}ellon {U}niversity",
 	year = 1995,
 	number = "CMU-CS-95-149"}
@@ -375,18 +375,18 @@ ogrammed {M}ultiprocessors",
         title = {The GRID: Blueprint for a New Computing Onfrastructure},
         publisher = {Morgan Kaufmann Publishers, Inc.},
         year = 1999}
-@conference{STORM01, 
+@conference{STORM01,
     author = "Eitan Frachtenberg and Fabrizio Petrini and others",
     title  = "STORM: Lightning-Fast Resource Management",
     booktitle = "Proceedings of SuperComputing",
     year      = 2002,
 }
-@misc{Authd02, 
+@misc{Authd02,
     author = "Authd home page",
     title  = "http://www.theether.org/authd/",
 }
 
-@misc{Quadrics02, 
+@misc{Quadrics02,
     AUTHOR = "Quadrics Resource Management System",
     TITLE  = "http://www.quadrics.com/",
 }
diff --git a/doc/jsspp/services.tex b/doc/jsspp/services.tex
index 0189f0acffb5ae2e09ad44f83c128f88a8412f9f..7bd51cf7158d739d1dae72228f0bc8be1340f07b 100644
--- a/doc/jsspp/services.tex
+++ b/doc/jsspp/services.tex
@@ -2,160 +2,160 @@
 \subsection{Command Line Utilities}
 
 The command line utilities are the user interface to SLURM functionality.
-They offer users access to remote execution and job control. They also 
-permit administrators to dynamically change the system configuration. 
-These commands all use SLURM APIs which are directly available for 
+They offer users access to remote execution and job control. They also
+permit administrators to dynamically change the system configuration.
+These commands all use SLURM APIs which are directly available for
 more sophisticated applications.
 
 \begin{itemize}
-\item {\tt scancel}: Cancel a running or a pending job or job step, 
-subject to authentication and authorization. This command can also 
-be used to send an arbitrary signal to all processes on all nodes 
+\item {\tt scancel}: Cancel a running or a pending job or job step,
+subject to authentication and authorization. This command can also
+be used to send an arbitrary signal to all processes on all nodes
 associated with a job or job step.
 
 \item {\tt scontrol}: Perform privileged administrative commands
-such as draining a node or partition in preparation for maintenance. 
+such as draining a node or partition in preparation for maintenance.
 Many \scontrol\ functions can only be executed by privileged users.
 
 \item {\tt sinfo}: Display a summary of partition and node information.
 A assortment of filtering and output format options are available.
 
-\item {\tt squeue}: Display the queue of running and waiting jobs 
-and/or job steps. A wide assortment of filtering, sorting, and output 
+\item {\tt squeue}: Display the queue of running and waiting jobs
+and/or job steps. A wide assortment of filtering, sorting, and output
 format options are available.
 
 \item {\tt srun}: Allocate resources, submit jobs to the SLURM queue,
-and initiate parallel tasks (job steps). 
-Every set of executing parallel tasks has an associated \srun\ which 
-initiated it and, if the \srun\ persists, managing it. 
-Jobs may be submitted for batch execution, in which case 
-\srun\ terminates after job submission. 
-Jobs may also be submitted for interactive execution, where \srun\ keeps 
-running to shepherd the running job. In this case, 
-\srun\ negotiates connections with remote {\tt slurmd}'s 
+and initiate parallel tasks (job steps).
+Every set of executing parallel tasks has an associated \srun\ which
+initiated it and, if the \srun\ persists, managing it.
+Jobs may be submitted for batch execution, in which case
+\srun\ terminates after job submission.
+Jobs may also be submitted for interactive execution, where \srun\ keeps
+running to shepherd the running job. In this case,
+\srun\ negotiates connections with remote {\tt slurmd}'s
 for job initiation and to
 get stdout and stderr, forward stdin, and respond to signals from the user.
 The \srun\ may also be instructed to allocate a set of resources and
 spawn a shell with access to those resources.
-\srun\ has a total of 13 parameters to control where and when the job 
+\srun\ has a total of 13 parameters to control where and when the job
 is initiated.
 
 \end{itemize}
 
 \subsection{Plugins}
 
-In order to make the use of different infrastructures possible, 
-SLURM uses a general purpose plugin mechanism. 
-A SLURM plugin is a dynamically linked code object which is 
-loaded explicitly at run time by the SLURM libraries. 
+In order to make the use of different infrastructures possible,
+SLURM uses a general purpose plugin mechanism.
+A SLURM plugin is a dynamically linked code object which is
+loaded explicitly at run time by the SLURM libraries.
 A plugin provides a customized implemenation of a well-defined
-API connected to tasks such as authentication, interconnect fabric, 
+API connected to tasks such as authentication, interconnect fabric,
 task scheduling.
-A common set of functions is defined for use by all of the different 
-infrastructures of a particular variety. 
-For example, the authentication plugin must define functions 
-such as: 
+A common set of functions is defined for use by all of the different
+infrastructures of a particular variety.
+For example, the authentication plugin must define functions
+such as:
 {\tt slurm\_auth\_activate} to create a credential,
-{\tt slurm\_auth\_verify} to verify a credential to 
-approve or deny authentication, 
-{\tt slurm\_auth\_get\_uid} to get the user ID associated with 
+{\tt slurm\_auth\_verify} to verify a credential to
+approve or deny authentication,
+{\tt slurm\_auth\_get\_uid} to get the user ID associated with
 a specific credential, etc.
-It also must define the data structure used, a plugin type, 
+It also must define the data structure used, a plugin type,
 a plugin version number.
 The available plugins are defined in the configuration file.
-%When a slurm daemon is initiated, it reads the configuration 
-%file to determine which of the available plugins should be used. 
-%For example {\em AuthType=auth/authd} says to use the plugin for 
-%authd based authentication and {\em PluginDir=/usr/local/lib} 
+%When a slurm daemon is initiated, it reads the configuration
+%file to determine which of the available plugins should be used.
+%For example {\em AuthType=auth/authd} says to use the plugin for
+%authd based authentication and {\em PluginDir=/usr/local/lib}
 %identifies the directory in which to find the plugin.
 
 \subsection{Communications Layer}
 
-SLURM presently uses Berkeley sockets for communications. 
-However, we anticipate using the plugin mechanism to easily 
-permit use of other communications layers. 
-At LLNL we are using an Ethernet for SLURM communications and 
-the Quadrics Elan switch exclusively for user applications. 
-The SLURM configuration file permits the identification of each 
-node's hostname as well as its name to be used for communications. 
-%In the case of a control machine known as {\em mcri} to be 
-%communicated with using the name {\em emcri} (say to indicate 
-%an ethernet communications path), this is represented in the 
+SLURM presently uses Berkeley sockets for communications.
+However, we anticipate using the plugin mechanism to easily
+permit use of other communications layers.
+At LLNL we are using an Ethernet for SLURM communications and
+the Quadrics Elan switch exclusively for user applications.
+The SLURM configuration file permits the identification of each
+node's hostname as well as its name to be used for communications.
+%In the case of a control machine known as {\em mcri} to be
+%communicated with using the name {\em emcri} (say to indicate
+%an ethernet communications path), this is represented in the
 %configuration file as {\em ControlMachine=mcri ControlAddr=emcri}.
-%The name used for communication is the same as the hostname unless 
+%The name used for communication is the same as the hostname unless
 %%otherwise specified.
 
-While SLURM is able to manage 1000 nodes without difficulty using 
-sockets and Ethernet, we are reviewing other communication 
-mechanisms which may offer improved scalability. 
-One possible alternative is STORM\cite{STORM01}. 
-STORM uses the cluster interconnect and Network Interface Cards to 
-provide high-speed communications including a broadcast capability. 
-STORM only supports the Quadrics Elan interconnnect at present, 
-but does offer the promise of improved performance and scalability. 
+While SLURM is able to manage 1000 nodes without difficulty using
+sockets and Ethernet, we are reviewing other communication
+mechanisms which may offer improved scalability.
+One possible alternative is STORM\cite{STORM01}.
+STORM uses the cluster interconnect and Network Interface Cards to
+provide high-speed communications including a broadcast capability.
+STORM only supports the Quadrics Elan interconnnect at present,
+but does offer the promise of improved performance and scalability.
 
 \subsection{Security}
 
-SLURM has a simple security model: 
+SLURM has a simple security model:
 Any user of the cluster may submit parallel jobs to execute and cancel
 his own jobs.  Any user may view SLURM configuration and state
-information.  
+information.
 Only privileged users may modify the SLURM configuration,
-cancel any jobs, or perform other restricted activities.  
-Privileged users in SLURM include the users {\em root} 
-and {\tt SlurmUser} (as defined in the SLURM configuration file). 
-If permission to modify SLURM configuration is 
+cancel any jobs, or perform other restricted activities.
+Privileged users in SLURM include the users {\em root}
+and {\tt SlurmUser} (as defined in the SLURM configuration file).
+If permission to modify SLURM configuration is
 required by others, set-uid programs may be used to grant specific
 permissions to specific users.
 
-We presently support three authentication mechanisms via plugins: 
-{\tt authd}\cite{Authd02}, {\tt munged} and {\tt none}. 
-A plugin can easily be developed for Kerberos or authentication 
+We presently support three authentication mechanisms via plugins:
+{\tt authd}\cite{Authd02}, {\tt munged} and {\tt none}.
+A plugin can easily be developed for Kerberos or authentication
 mechanisms as desired.
 The \munged\ implementation is described below.
-A \munged\ daemon running as user {\em root} on each node confirms the 
-identify of the user making the request using the {\tt getpeername} 
-function and generates a credential. 
-The credential contains a user ID, 
-group ID, time-stamp, lifetime, some pseudo-random information, and 
-any user supplied information. The \munged\ uses a private key to 
+A \munged\ daemon running as user {\em root} on each node confirms the
+identify of the user making the request using the {\tt getpeername}
+function and generates a credential.
+The credential contains a user ID,
+group ID, time-stamp, lifetime, some pseudo-random information, and
+any user supplied information. The \munged\ uses a private key to
 generate a Message Authentication Code (MAC) for the credential.
-The \munged\ then uses a public key to symmetrically encrypt 
-the credential including the MAC. 
-SLURM daemons and programs transmit this encrypted 
-credential with communications. The SLURM daemon receiving the message 
-sends the credential to \munged\ on that node. 
-The \munged\ decrypts the credential using its private key, validates it 
-and returns the user ID and group ID of the user originating the 
+The \munged\ then uses a public key to symmetrically encrypt
+the credential including the MAC.
+SLURM daemons and programs transmit this encrypted
+credential with communications. The SLURM daemon receiving the message
+sends the credential to \munged\ on that node.
+The \munged\ decrypts the credential using its private key, validates it
+and returns the user ID and group ID of the user originating the
 credential.
-The \munged\ prevents replay of a credential on any single node 
+The \munged\ prevents replay of a credential on any single node
 by recording credentials that have already been authenticated.
-In SLURM's case, the user supplied information includes node 
-identification information to prevent a credential from being 
+In SLURM's case, the user supplied information includes node
+identification information to prevent a credential from being
 used on nodes it is not destined for.
 
-When resources are allocated to a user by the controller, a 
-{\em job step credential} is generated by combining the user ID, job ID, 
+When resources are allocated to a user by the controller, a
+{\em job step credential} is generated by combining the user ID, job ID,
 step ID, the list of resources allocated (nodes), and the credential
-lifetime. This job step credential is encrypted with 
-a \slurmctld\ private key. This credential 
+lifetime. This job step credential is encrypted with
+a \slurmctld\ private key. This credential
 is returned to the requesting agent ({\tt srun}) along with the
-allocation response, and must be forwarded to the remote {\tt slurmd}'s 
+allocation response, and must be forwarded to the remote {\tt slurmd}'s
 upon job step initiation. \slurmd\ decrypts this credential with the
 \slurmctld 's public key to verify that the user may access
-resources on the local node. \slurmd\ also uses this job step credential 
-to authenticate standard input, output, and error communication streams. 
-
-%Access to partitions may be restricted via a {\em RootOnly} flag.  
-%If this flag is set, job submit or allocation requests to this 
-%partition are only accepted if the effective user ID originating 
-%the request is a privileged user. 
-%The request from such a user may submit a job as any other user. 
+resources on the local node. \slurmd\ also uses this job step credential
+to authenticate standard input, output, and error communication streams.
+
+%Access to partitions may be restricted via a {\em RootOnly} flag.
+%If this flag is set, job submit or allocation requests to this
+%partition are only accepted if the effective user ID originating
+%the request is a privileged user.
+%The request from such a user may submit a job as any other user.
 %This may be used, for example, to provide specific external schedulers
-%with exclusive access to partitions.  Individual users will not be 
-%permitted to directly submit jobs to such a partition, which would 
-%prevent the external scheduler from effectively managing it.  
-%Access to partitions may also be restricted to users who are 
+%with exclusive access to partitions.  Individual users will not be
+%permitted to directly submit jobs to such a partition, which would
+%prevent the external scheduler from effectively managing it.
+%Access to partitions may also be restricted to users who are
 %members of specific Unix groups using a {\em AllowGroups} specification.
 
 \subsection{Job Initiation}
diff --git a/doc/jsspp/survey.tex b/doc/jsspp/survey.tex
index 8d68fdda639935cad2d667d9eeed1c22ed3c9045..9426262413becfe0ccfe7dbfeef5a7f99b8144d0 100644
--- a/doc/jsspp/survey.tex
+++ b/doc/jsspp/survey.tex
@@ -2,42 +2,42 @@
 \subsection*{Portable Batch System (PBS)}
 
 The Portable Batch System (PBS)~\cite{PBS}
-is a flexible batch queuing and 
-workload management system originally developed by Veridian Systems 
-for NASA.  It operates on networked, multi-platform UNIX environments, 
-including heterogeneous clusters of workstations, supercomputers, and 
-massively parallel systems. PBS was developed as a replacement for 
+is a flexible batch queuing and
+workload management system originally developed by Veridian Systems
+for NASA.  It operates on networked, multi-platform UNIX environments,
+including heterogeneous clusters of workstations, supercomputers, and
+massively parallel systems. PBS was developed as a replacement for
 NQS (Network Queuing System) by many of the same people.
 
-PBS supports sophisticated scheduling logic (via the Maui 
-Scheduler). 
-PBS spawn's daemons on each 
+PBS supports sophisticated scheduling logic (via the Maui
+Scheduler).
+PBS spawn's daemons on each
 machine to shepherd the job's tasks.
-It provides an interface for administrators to easily 
-interface their own scheduling modules.  PBS can support 
-long delays in file staging with retry.  Host 
-authentication is provided by checking port numbers (low ports numbers are only 
-accessible to user root).  Credential service is used for user authentication. 
+It provides an interface for administrators to easily
+interface their own scheduling modules.  PBS can support
+long delays in file staging with retry.  Host
+authentication is provided by checking port numbers (low ports numbers are only
+accessible to user root).  Credential service is used for user authentication.
 It has the job prolog and epilog feature.
-PBS Supports 
-high priority queue for smaller "interactive" jobs.  Signal to daemons 
-causes current log file to be closed, renamed with 
+PBS Supports
+high priority queue for smaller "interactive" jobs.  Signal to daemons
+causes current log file to be closed, renamed with
 time-stamp, and a new log file created.
 
 Although the PBS is portable and has a broad user base, it has significant drawbacks.
-PBS is single threaded and hence exhibits poor performance on large clusters. 
-This is particularly problematic when a compute node in the system fails: 
-PBS tries to contact down node while other activities must wait. 
-PBS also has a weak mechanism for starting and cleaning up parallel jobs. 
-%Specific complaints about PBS from members of the OSCAR group (Jeremy Enos, 
+PBS is single threaded and hence exhibits poor performance on large clusters.
+This is particularly problematic when a compute node in the system fails:
+PBS tries to contact down node while other activities must wait.
+PBS also has a weak mechanism for starting and cleaning up parallel jobs.
+%Specific complaints about PBS from members of the OSCAR group (Jeremy Enos,
 %Jeff Squyres, Tim Mattson):
 %\begin{itemize}
-%\item Sensitivity to hostname configuration on the server; improper 
-%      configuration results in hard to diagnose failure modes.  Once 
+%\item Sensitivity to hostname configuration on the server; improper
+%      configuration results in hard to diagnose failure modes.  Once
 %      configuration is correct, this issue disappears.
-%\item When a compute node in the system dies, everything slows down.  
+%\item When a compute node in the system dies, everything slows down.
 %      PBS is single-threaded and continues to try to contact down nodes,
-%      while other activities like scheduling jobs, answering qsub/qstat 
+%      while other activities like scheduling jobs, answering qsub/qstat
 %      requests, etc., have to wait for a complete timeout cycle before being
 %      processed.
 %\item Default scheduler is just FIFO, but Maui can be plugged in so this
@@ -45,17 +45,17 @@ PBS also has a weak mechanism for starting and cleaning up parallel jobs.
 %\item Weak mechanism for starting/cleaning up parallel jobs (pbsdsh).
 %      When a job is killed, pbsdsh kills the processes it started, but
 %      if the process doesn't die on the first shot it may continue on.
-%\item PBS server continues to mark specific nodes offline, even though they 
+%\item PBS server continues to mark specific nodes offline, even though they
 %      are healthy.  Restarting the server fixes this.
-%\item Lingering jobs.  Jobs assigned to nodes, and then bounced back to the 
-%      queue for any reason, maintain their assignment to those nodes, even 
-%      if another job had already started on them.  This is a poor clean up 
+%\item Lingering jobs.  Jobs assigned to nodes, and then bounced back to the
+%      queue for any reason, maintain their assignment to those nodes, even
+%      if another job had already started on them.  This is a poor clean up
 %      issue.
 %\item When the PBS server process is restarted, it puts running jobs at risk.
-%\item Poor diagnostic messages.  This problem can be as serious as ANY other 
-%      problem.  This problem makes small, simple problems turn into huge 
-%      turmoil occasionally.  For example, the variety of symptoms that arise 
-%      from improper hostname configuration.  All the symptoms that result are 
+%\item Poor diagnostic messages.  This problem can be as serious as ANY other
+%      problem.  This problem makes small, simple problems turn into huge
+%      turmoil occasionally.  For example, the variety of symptoms that arise
+%      from improper hostname configuration.  All the symptoms that result are
 %      very misleading to the real problem.
 %\item Rumored to have problems when the number of jobs in the queues gets
 %      large.
@@ -69,7 +69,7 @@ PBS also has a weak mechanism for starting and cleaning up parallel jobs.
 %
 %PBS is owned by Veridian and is released as three separate products with
 %different licenses: {\em PBS Pro} is a commercial product sold by Veridian;
-%{\em OpenPBS} is an pseudo open source version of PBS that requires 
+%{\em OpenPBS} is an pseudo open source version of PBS that requires
 %registration; and
 %{\em PBS} is a GPL-like, true open source version of PBS.
 %
@@ -83,148 +83,148 @@ PBS also has a weak mechanism for starting and cleaning up parallel jobs.
 \subsection{Quadrics RMS}
 
 Quadrics RMS\cite{Quadrics02}
-(Resource Management System) is for 
-Unix systems having Quadrics Elan interconnects. 
-RMS functionality and performance is excellent. 
-It major limitation is the requirement for a Quadrics interconnect. 
-The proprietary code and cost may also pose difficulties under some 
+(Resource Management System) is for
+Unix systems having Quadrics Elan interconnects.
+RMS functionality and performance is excellent.
+It major limitation is the requirement for a Quadrics interconnect.
+The proprietary code and cost may also pose difficulties under some
 circumstances.
 
 
 \subsection*{Maui Scheduler}
 
-Maui Scheduler~\cite{Maui} is an advanced reservation HPC batch scheduler 
-for use with SP, O2K, and UNIX/Linux clusters. 
-It is widely used to extend the functionality of PBS and LoadLeveler, 
+Maui Scheduler~\cite{Maui} is an advanced reservation HPC batch scheduler
+for use with SP, O2K, and UNIX/Linux clusters.
+It is widely used to extend the functionality of PBS and LoadLeveler,
 which Maui requires to perform the parallel job initiation and management.
 
 \subsection*{Distributed Production Control System (DPCS)}
 
 The Distributed Production Control System (DPCS)~\cite{DPCS}
-is a scheduler developed at Lawrence Livermore National Laboratory (LLNL). 
+is a scheduler developed at Lawrence Livermore National Laboratory (LLNL).
 The DPCS provides basic data collection and reporting
 mechanisms for prject-level, near real-time accounting and resource allocation
 to customers with established limits per customers' organization budgets,
 In addition, the DPCS evenly distributes workload across available computers
 and supports dynamic reconfiguration and graceful degradation of service to prevent
 overuse of a computer where not authorized.
-%DPCS is (or will soon be) open source, although its use is presently 
-%confined to LLNL. The development of DPCS began in 1990 and it has 
-%evolved into a highly scalable and fault-tolerant meta-scheduler 
-%operating on top of LoadLeveler, RMS, and NQS. DPCS provides: 
+%DPCS is (or will soon be) open source, although its use is presently
+%confined to LLNL. The development of DPCS began in 1990 and it has
+%evolved into a highly scalable and fault-tolerant meta-scheduler
+%operating on top of LoadLeveler, RMS, and NQS. DPCS provides:
 %\begin{itemize}
-%\item Basic data collection and reporting mechanisms for project-level, 
+%\item Basic data collection and reporting mechanisms for project-level,
 %      near real-time accounting.
-%\item Resource allocation to customers with established limits per 
-%      customers' organizational budgets. 
-%\item Proactive delivery of services to organizations that are relatively 
+%\item Resource allocation to customers with established limits per
+%      customers' organizational budgets.
+%\item Proactive delivery of services to organizations that are relatively
 %      underserviced using a fair-share resource allocation scheme.
-%\item Automated, highly flexible system with feedback for proactive delivery 
+%\item Automated, highly flexible system with feedback for proactive delivery
 %      of resources.
 %\item Even distribution of the workload across available computers.
 %\item Flexible prioritization of production workload, including "run on demand."
 %\item Dynamic reconfiguration and re-tuning.
-%\item Graceful degradation in service to prevent overuse of a computer where 
+%\item Graceful degradation in service to prevent overuse of a computer where
 %      not authorized.
 %\end{itemize}
 
-DPCS supports only a 
-limited number of computer systems: IBM RS/6000 and SP, Linux, 
-Sun Solaris, and Compaq Alpha. 
-Like the Maui Scheduler, DPCS requires an underlying infrastructure for 
+DPCS supports only a
+limited number of computer systems: IBM RS/6000 and SP, Linux,
+Sun Solaris, and Compaq Alpha.
+Like the Maui Scheduler, DPCS requires an underlying infrastructure for
 parallel job initiation and management (LoadLeveler, NQS, RMS or SLURM).
 
 \subsection*{LoadLeveler}
 
-LoadLeveler~\cite{LoadLevelerManual,LoadLevelerWeb} 
-is a proprietary batch system and parallel job manager by 
-IBM. LoadLeveler supports few non-IBM systems. Very primitive 
-scheduling software exists and other software is required for reasonable 
-performance, such as Maui Scheduler or DPCS. 
-The LoadLeveler has a  simple and very flexible queue and job class structure available 
-operating in "matrix" fashion. 
-The biggest problem of the LoadLeveler is its poor scalability. 
+LoadLeveler~\cite{LoadLevelerManual,LoadLevelerWeb}
+is a proprietary batch system and parallel job manager by
+IBM. LoadLeveler supports few non-IBM systems. Very primitive
+scheduling software exists and other software is required for reasonable
+performance, such as Maui Scheduler or DPCS.
+The LoadLeveler has a  simple and very flexible queue and job class structure available
+operating in "matrix" fashion.
+The biggest problem of the LoadLeveler is its poor scalability.
 It typically requires 20 minutes to execute even a trivial 500-node, 8000-task
 on the IBM SP computers at LLNL.
 %In addition, all jobs must be initiated through the LoadLeveler, and a special version of
-%MPI is requested to run a parallel job. 
+%MPI is requested to run a parallel job.
 %[So do RMS, SLURM, etc. for interconnect set-up - Moe]%
 %
-%Many configuration files exist with signals to 
-%daemons used to update configuration (like LSF, good). All jobs must 
-%be initiated through LoadLeveler (no real "interactive" jobs, just 
-%high priority queue for smaller jobs). Job accounting is only available 
-%on termination (very bad for long-running jobs). Good status 
-%information on nodes and LoadLeveler daemons is available. LoadLeveler 
+%Many configuration files exist with signals to
+%daemons used to update configuration (like LSF, good). All jobs must
+%be initiated through LoadLeveler (no real "interactive" jobs, just
+%high priority queue for smaller jobs). Job accounting is only available
+%on termination (very bad for long-running jobs). Good status
+%information on nodes and LoadLeveler daemons is available. LoadLeveler
 %allocates jobs either entire nodes or shared nodes ,depending upon configuration.
 %
-%A special version of MPI is required. LoadLeveler allocates 
-%interconnect resources, spawns the user's processes, and manages the 
-%job afterwards. Daemons also monitor the switch and node health using 
-%a "heart-beat monitor." One fundamental problem is that when the 
-%"Central Manager" restarts, it forgets about all nodes and jobs. They 
-%appear in the database only after checking in via the heartbeat. It 
-%needs to periodically write state to disk instead of doing 
-%"cold-starts" after the daemon fails, which is rare. It has the job 
-%prolog and epilog feature, which permits us to enable/disable logins 
+%A special version of MPI is required. LoadLeveler allocates
+%interconnect resources, spawns the user's processes, and manages the
+%job afterwards. Daemons also monitor the switch and node health using
+%a "heart-beat monitor." One fundamental problem is that when the
+%"Central Manager" restarts, it forgets about all nodes and jobs. They
+%appear in the database only after checking in via the heartbeat. It
+%needs to periodically write state to disk instead of doing
+%"cold-starts" after the daemon fails, which is rare. It has the job
+%prolog and epilog feature, which permits us to enable/disable logins
 %and remove stray processes.
 %
-%LoadLeveler evolved from Condor, or what was Condor a decade ago. 
-%While I am less familiar with LSF and Condor than LoadLeveler, they 
-%all appear very similar with LSF having the far more sophisticated 
-%scheduler. We should carefully review their data structures and 
+%LoadLeveler evolved from Condor, or what was Condor a decade ago.
+%While I am less familiar with LSF and Condor than LoadLeveler, they
+%all appear very similar with LSF having the far more sophisticated
+%scheduler. We should carefully review their data structures and
 %daemons before designing our own.
 %
 \subsection*{Load Sharing Facility (LSF)}
 
 LSF~\cite{LSF}
-is a proprietary batch system and parallel job manager by 
-Platform Computing. Widely deployed on a wide variety of computer 
-architectures, it has sophisticated scheduling software including 
-fair-share, backfill, consumable resources, an job preemption and 
+is a proprietary batch system and parallel job manager by
+Platform Computing. Widely deployed on a wide variety of computer
+architectures, it has sophisticated scheduling software including
+fair-share, backfill, consumable resources, an job preemption and
 very flexible queue structure.
 It also provides good status information on nodes and LSF daemons.
-While LSF is quite powerful, it is not open-source and can be costly on 
+While LSF is quite powerful, it is not open-source and can be costly on
 larger clusters.
 %The LSF share many of its shortcomings with the LoadLeveler: job initiation only
 %through LSF, requirement of a spwcial MPI library, etc.
-%Limits are available on both a per process bs per-job  
-%basis. Time limits include CPU time and wall-clock time. Many 
-%configuration files with signals to daemons used to update 
-%configuration (like LoadLeveler, good). All jobs must be initiated 
-%through LSF to be accounted for and managed by LSF ("interactive" 
-%jobs can be executed through a high priority queue for 
-%smaller jobs). Job accounting only available in near real-time (important 
-%for long-running jobs). Jobs initiated from same directory as 
-%submitted from (not good for computer centers with diverse systems 
-%under LSF control). Good status information on nodes and LSF daemons. 
-%Allocates jobs either entire nodes or shared nodes depending upon 
+%Limits are available on both a per process bs per-job
+%basis. Time limits include CPU time and wall-clock time. Many
+%configuration files with signals to daemons used to update
+%configuration (like LoadLeveler, good). All jobs must be initiated
+%through LSF to be accounted for and managed by LSF ("interactive"
+%jobs can be executed through a high priority queue for
+%smaller jobs). Job accounting only available in near real-time (important
+%for long-running jobs). Jobs initiated from same directory as
+%submitted from (not good for computer centers with diverse systems
+%under LSF control). Good status information on nodes and LSF daemons.
+%Allocates jobs either entire nodes or shared nodes depending upon
 %configuration.
 %
-%A special version of MPI is required. LSF allocates interconnect 
-%resources, spawns the user's processes, and manages the job 
-%afterwards. While I am less familiar with LSF than LoadLeveler, they 
-%appear very similar with LSF having the far more sophisticated 
-%scheduler. We should carefully review their data structures and 
+%A special version of MPI is required. LSF allocates interconnect
+%resources, spawns the user's processes, and manages the job
+%afterwards. While I am less familiar with LSF than LoadLeveler, they
+%appear very similar with LSF having the far more sophisticated
+%scheduler. We should carefully review their data structures and
 %daemons before designing our own.
 
 
 \subsection*{Condor}
 
 Condor~\cite{Condor,Litzkow88,Basney97}
-is a batch system and parallel job manager 
-developed by the University of Wisconsin. 
-Condor was the basis for IBM's LoadLeveler and both share very similar 
-underlying infrastructure. Condor has a very sophisticated checkpoint/restart 
-service that does not rely upon kernel changes, but a variety of 
-library changes (which prevent it from being completely general). The 
-Condor checkpoint/restart service has been integrated into LSF, 
-Codine, and DPCS. Condor is designed to operate across a 
-heterogeneous environment, mostly to harness the compute resources of 
-workstations and PCs. It has an interesting "advertising" service. 
-Servers advertise their available resources and consumers advertise 
-their requirements for a broker to perform matches. The checkpoint 
-mechanism is used to relocate work on demand (when the "owner" of a 
+is a batch system and parallel job manager
+developed by the University of Wisconsin.
+Condor was the basis for IBM's LoadLeveler and both share very similar
+underlying infrastructure. Condor has a very sophisticated checkpoint/restart
+service that does not rely upon kernel changes, but a variety of
+library changes (which prevent it from being completely general). The
+Condor checkpoint/restart service has been integrated into LSF,
+Codine, and DPCS. Condor is designed to operate across a
+heterogeneous environment, mostly to harness the compute resources of
+workstations and PCs. It has an interesting "advertising" service.
+Servers advertise their available resources and consumers advertise
+their requirements for a broker to perform matches. The checkpoint
+mechanism is used to relocate work on demand (when the "owner" of a
 desktop machine wants to resume work).
 
 %
@@ -241,7 +241,7 @@ desktop machine wants to resume work).
 
 \subsection*{Beowulf Distributed Process Space (BPROC)}
 
-The Beowulf Distributed Process Space 
+The Beowulf Distributed Process Space
 (BPROC)
 is set of kernel
 modifications, utilities and libraries which allow a user to start
@@ -250,13 +250,13 @@ processes started with this mechanism appear in the process table
 of the front end machine in a cluster. This allows remote process
 management using the normal UNIX process control facilities. Signals
 are transparently forwarded to remote processes and exit status is
-received using the usual wait() mechanisms. This tight coupling of 
-a cluster's nodes is convenient, but high scalability can be difficult 
+received using the usual wait() mechanisms. This tight coupling of
+a cluster's nodes is convenient, but high scalability can be difficult
 to achieve.
 
 %\subsection{xcat}
 %
-%Presumably IBM's suite of cluster management software 
+%Presumably IBM's suite of cluster management software
 %(xcat\footnote{http://publib-b.boulder.ibm.com/Redbooks.nsf/RedbookAbstracts/sg246041.html})
 %includes a batch system.  Look into this.
 %
@@ -266,16 +266,16 @@ to achieve.
 %Parallel Job Launcher, Compute Node Daemon Process,
 %Compute Node Allocator, Compute Node Status Tool.
 %
-%\subsection{NQS} 
+%\subsection{NQS}
 %
-%NQS\footnote{http://umbc7.umbc.edu/nqs/nqsmain.html}, 
+%NQS\footnote{http://umbc7.umbc.edu/nqs/nqsmain.html},
 %the Network Queueing System, is a serial batch system.
 %
 %\subsection*{LAM / MPI}
 %
 %Local Area Multicomputer (LAM)~\cite{LAM}
-%is an MPI programming environment and development system for heterogeneous 
-%computers on a network. 
+%is an MPI programming environment and development system for heterogeneous
+%computers on a network.
 %With LAM, a dedicated cluster or an existing network
 %computing infrastructure can act as one parallel computer solving
 %one problem.  LAM features extensive debugging support in the
@@ -296,11 +296,11 @@ to achieve.
 %
 %\subsection{SCIDAC}
 %
-%The Scientific Discovery through Advanced Computing (SciDAC) 
+%The Scientific Discovery through Advanced Computing (SciDAC)
 %project\footnote{http://www.scidac.org/ScalableSystems}
 %has a Resource Management and Accounting working group
-%and a white paper\cite{Res2000}. Deployment of a system with 
-%the required fault-tolerance and scalability is scheduled 
+%and a white paper\cite{Res2000}. Deployment of a system with
+%the required fault-tolerance and scalability is scheduled
 %for June 2006.
 %
 %\subsection{GNU Queue}
@@ -312,5 +312,5 @@ to achieve.
 %Separate queueing system?
 %
 %\subsection{SQMX}
-%Part of the SCE Project\footnote{http://www.opensce.org/}, 
+%Part of the SCE Project\footnote{http://www.opensce.org/},
 %SQMX\footnote{http://www.beowulf.org/pipermail/beowulf-announce/2001-January/000086.html} is worth taking a look at.
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index 3ee8ff7965911739361d38d68e3d86324170a74d..e33fbe2b389ae913b710d51519a228e75845aa4b 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -1,5 +1,5 @@
 
-EXTRA_DIST = man1 man3 man5 man8 
+EXTRA_DIST = man1 man3 man5 man8
 
 man1_MANS =            \
 	man1/sacct.1 \
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index 0184288c2b1cab2702411d86b3c3529d24e32700..c4f6507a0e9bd0dc50e96f387440ce53da1db2fd 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -292,7 +292,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-EXTRA_DIST = man1 man3 man5 man8 
+EXTRA_DIST = man1 man3 man5 man8
 man1_MANS = \
 	man1/sacct.1 \
 	man1/sacctmgr.1 \
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 149ba08ccaf9b0dd19039cf6d0564fe69aa0bcc8..f2904adacb0943f22cb2915a32bcdb5befb6b9ed 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -12,33 +12,33 @@ SLURM job accounting log or SLURM database
 Accounting information for jobs invoked with SLURM are either logged
 in the job accounting log file or saved to the SLURM database.
 .PP
-The 
+The
 .BR "sacct "
-command displays job accounting data stored in the job accounting log 
+command displays job accounting data stored in the job accounting log
 file or SLURM database in a variety of forms for your analysis.
-The 
+The
 .BR "sacct "
-command displays information on jobs, job steps, status, and exitcodes by 
+command displays information on jobs, job steps, status, and exitcodes by
 default.
-You can tailor the output with the use of the 
-\f3\-\-fields=\fP 
+You can tailor the output with the use of the
+\f3\-\-fields=\fP
 option to specify the fields to be shown.
 .PP
-For the root user, the 
+For the root user, the
 .BR "sacct "
-command displays job accounting data for all users, although there are 
-options to filter the output to report only the jobs from a specified 
+command displays job accounting data for all users, although there are
+options to filter the output to report only the jobs from a specified
 user or group.
 .PP
-For the non\-root user, the 
+For the non\-root user, the
 .BR "sacct "
-command limits the display of job accounting data to jobs that were 
+command limits the display of job accounting data to jobs that were
 launched with their own user identifier (UID) by default.
-Data for other users can be displayed with the 
+Data for other users can be displayed with the
 \f3\-\-all\fP,
 \f3\-\-user\fP,
-or 
-\f3\-\-uid\fP 
+or
+\f3\-\-uid\fP
 options.
 .TP "10"
 \f3Note: \fP\c
@@ -50,7 +50,7 @@ gather and report incomplete information for these calls;
 actually available on your system.
 .IP
 If \-\-dump is specified, the field selection options (\-\-brief,
-\-\-format, ...) have no effect. 
+\-\-format, ...) have no effect.
 .IP
 Elapsed time fields are presented as 2 fields, integral seconds and integral microseconds
 .IP
@@ -65,35 +65,35 @@ parameter in slurm.conf.
 .TP "10"
 \f3\-a \fP\f3,\fP \f3\-\-allusers\fP
 Displays the current user's jobs. Displays all users jobs when run by root.
-.IP 
+.IP
 
 .TP
 \f3\-A \fP\f2account_list\fP \fP\f3,\fP \f3\-\-accounts\fP\f3=\fP\f2account_list\fP
 Displays jobs when a comma separated list of accounts are given as the
 argument.
-.IP 
+.IP
 
 
-.TP 
+.TP
 \f3\-b \fP\f3,\fP \f3\-\-brief\fP
 Displays a brief listing, which includes the following data:
-.RS 
+.RS
 .TP "3"
 \(bu
-\f3jobid\fP 
+\f3jobid\fP
 .TP "3"
 \(bu
-\f3status\fP 
+\f3status\fP
 .TP "3"
 \(bu
-\f3exitcode\fP 
-.RE 
-.IP 
-This option has no effect when the 
-\f3\-\-\-dump\fP 
+\f3exitcode\fP
+.RE
+.IP
+This option has no effect when the
+\f3\-\-\-dump\fP
 option is also specified.
 
-.TP 
+.TP
 \f3\-C \fP\f2cluster_list\fP\f3,\fP  \f3\-\-cluster\fP\f3=\fP\f2cluster_list\fP
 Displays the statistics only for the jobs started on the clusters specified by
 the \f2cluster_list\fP operand, which is a comma\-separated list of clusters.
@@ -104,25 +104,25 @@ command on\&.
 .TP
 \f3\-c \fP\f3,\fP \f3\-\-completion\fP
 Use job completion instead of job accounting.
-.IP 
+.IP
 
 
-.TP 
+.TP
 \f3\-d \fP\f3,\fP \f3\-\-dump\fP
 Dumps the raw data records.
-.IP 
+.IP
 
-The section titled "INTERPRETING THE \-\-dump OPTION OUTPUT" describes the 
+The section titled "INTERPRETING THE \-\-dump OPTION OUTPUT" describes the
 data output when this option is used.
 
-.TP 
+.TP
 \f3\-\-duplicates\fP
 If SLURM job ids are reset, but the job accounting log file isn't
 reset at the same time (with \-e, for example), some job numbers will
 probably appear more than once in the accounting log file to refer to
 different jobs; such jobs can be distinguished by the "submit" time
 stamp in the data records.
-.IP 
+.IP
 When data for specific jobs are requested with the \-\-jobs option, we
 assume that the user wants to see only the most recent job with that
 number. This behavior can be overridden by specifying \-\-duplicates, in
@@ -133,31 +133,31 @@ which case all records that match the selection criteria will be returned.
 .IP
 Print a list of fields that can be specified with the \f3\-\-format\fP option.
 .IP
-.RS 
+.RS
 .PP
-.nf 
+.nf
 .ft 3
 Fields available:
 
-AllocCPUS     Account       AssocID       AveCPU       
-AvePages      AveRSS        AveVMSize     BlockID      
-Cluster       CPUTime       CPUTimeRAW    Elapsed      
-Eligible      End           ExitCode      GID          
-Group         JobID         JobName       Layout       
-MaxPages      MaxPagesNode  MaxPagesTask  MaxRSS       
+AllocCPUS     Account       AssocID       AveCPU
+AvePages      AveRSS        AveVMSize     BlockID
+Cluster       CPUTime       CPUTimeRAW    Elapsed
+Eligible      End           ExitCode      GID
+Group         JobID         JobName       Layout
+MaxPages      MaxPagesNode  MaxPagesTask  MaxRSS
 MaxRSSNode    MaxRSSTask    MaxVMSize     MaxVMSizeNode
-MaxVMSizeTask MinCPU        MinCPUNode    MinCPUTask   
-NCPUS         NNodes        NodeList      NTasks       
-Priority      Partition     QOS           QOSRAW       
-ReqCPUS       Reserved      ResvCPU       ResvCPURAW   
-Start         State         Submit        Suspended    
-SystemCPU     Timelimit     TotalCPU      UID          
-User          UserCPU       WCKey         WCKeyID      
+MaxVMSizeTask MinCPU        MinCPUNode    MinCPUTask
+NCPUS         NNodes        NodeList      NTasks
+Priority      Partition     QOS           QOSRAW
+ReqCPUS       Reserved      ResvCPU       ResvCPURAW
+Start         State         Submit        Suspended
+SystemCPU     Timelimit     TotalCPU      UID
+User          UserCPU       WCKey         WCKeyID
 
 .ft 1
-.fi 
-.RE 
-.IP 
+.fi
+.RE
+.IP
 The section titled "Job Accounting Fields" describes these fields.
 
 .TP
@@ -169,89 +169,89 @@ option return jobs in this state before this period.
 Valid time formats are...
 HH:MM[:SS] [AM|PM]
 MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
-MM/DD[/YY]-HH:MM[:SS]         
+MM/DD[/YY]-HH:MM[:SS]
 YYYY-MM-DD[THH:MM[:SS]]
 .IP
 
 
-.TP 
+.TP
 \f3\-f \fP\f2file\fP\f3,\fP  \f3\-\-file\fP\f3=\fP\f2file\fP
-Causes the 
+Causes the
 .BR "sacct "
-command to read job accounting data from the named 
-\f2file\fP 
+command to read job accounting data from the named
+\f2file\fP
 instead of the current SLURM job accounting log file. Only applicable
 when running the filetxt plugin.
 
-.TP 
+.TP
 \f3\-g \fP\f2gid_list\fP\f3,\fP  \f3\-\-gid\fP\f3=\fP\f2gid_list\fP \f3\-\-group\fP\f3=\fP\f2group_list\fP
 Displays the statistics only for the jobs started with the GID
 or the GROUP specified by the \f2gid_list\fP or the\f2group_list\fP operand, which is a comma\-separated
-list.  Space characters are not allowed. 
-Default is no restrictions.\&. 
+list.  Space characters are not allowed.
+Default is no restrictions.\&.
 
-.TP 
+.TP
 \f3\-h \fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
-.TP 
+.TP
 
 
-.TP 
+.TP
 \f3\-j \fP\f2job(.step)\fP \f3,\fP  \f3\-\-jobs\fP\f3=\fP\f2job(.step)\fP
 Displays information about the specified job(.step) or list of job(.step)s.
-.IP 
-The 
-\f2job(.step)\fP 
+.IP
+The
+\f2job(.step)\fP
 parameter is a comma\-separated list of jobs.
 Space characters are not permitted in this list.
-.IP 
+.IP
 The default is to display information on all jobs.
 
-.TP 
+.TP
 \f3\-l\fP\f3,\fP \f3\-\-long\fP
 Equivalent to specifying:
 .IP
 .na
-\'\-\-fields=jobid,jobname,partition,maxvsize,maxvsizenode,maxvsizetask,avevsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,alloccpus,elapsed,state,exitcode\' 
+\'\-\-fields=jobid,jobname,partition,maxvsize,maxvsizenode,maxvsizetask,avevsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,alloccpus,elapsed,state,exitcode\'
 .ad
 
 
-.TP 
+.TP
 \f3\-L\fP\f3,\fP \f3\-\-allclusters\fP
 Display jobs ran on all clusters. By default, only jobs ran on the
 cluster from where sacct is called are displayed.
 
-.TP 
+.TP
 \f3\-n\fP\f3,\fP \f3\-\-noheader\fP
 No heading will be added to the output. The default action is to
 display a header.
-.IP 
-This option has no effect when used with the 
-\f3\-\-dump\fP 
+.IP
+This option has no effect when used with the
+\f3\-\-dump\fP
 option.
 
-.TP 
+.TP
 \f3\-N\fP\f3,\fP \f3\-\-nodelist\fP
 Display jobs that ran on any of these nodes, can be one or more using
 a ranged string.
 .IP
 
-.TP 
+.TP
 \f3\-o \fP\f3,\fP \f3\-\-format\fP
 Comma seperated list of fields. (use "\-\-helpformat" for a list of
-available fields). 
+available fields).
 
 NOTE: When using the format option for listing various fields you can put a
 %NUMBER afterwards to specify how many characters should be printed.
 
 i.e. format=name%30 will print 30 characters of field name right
-justified.  A \-30 will print 30 characters left justified.  
-.IP 
+justified.  A \-30 will print 30 characters left justified.
+.IP
 
-.TP 
+.TP
 \f3\-O \fP\f3,\fP \f3\-\-formatted_dump\fP
 Dumps accounting records in an easy\-to\-read format.
-.IP 
+.IP
 This option is provided for debugging.
 
 .TP
@@ -268,49 +268,49 @@ output will be '|' delimited without a '|' at the end
 Comma seperated list of partitions to select jobs and job steps
 from. The default is all partitions.
 
-.TP 
+.TP
 \f3\-s \fP\f2state_list\fP \f3,\fP  \f3\-\-state\fP\f3=\fP\f2state_list\fP
 Selects jobs based on their current state or the state they were in
-during the time period given, which can be designated with 
+during the time period given, which can be designated with
 the following state designators:
-.RS 
+.RS
 .TP "10"
-\f3r\fP 
+\f3r\fP
 running
 .TP
 \f3s\fP
 suspended
-.TP 
-\f3ca\fP 
+.TP
+\f3ca\fP
 cancelled
-.TP 
-\f3cd\fP 
+.TP
+\f3cd\fP
 completed
-.TP 
-\f3pd\fP 
+.TP
+\f3pd\fP
 pending
-.TP 
-\f3f\fP 
+.TP
+\f3f\fP
 failed
-.TP 
-\f3to\fP 
+.TP
+\f3to\fP
 timed out
-.TP 
-\f3nf\fP 
+.TP
+\f3nf\fP
 node_fail
-.RE 
-.IP 
-The 
-\f2state_list\fP 
+.RE
+.IP
+The
+\f2state_list\fP
 operand is a comma\-separated list of these state designators.
-Space characters are not allowed in the 
+Space characters are not allowed in the
 \f2state_list\fP\c
 
 NOTE: When specifying states and no start time is given the default
 starttime is 'now'.
 \&.
 
-.TP 
+.TP
 \f3\-S \fP\f3,\fP \f3\-\-starttime\fP
 Select jobs eligible after the specified time. Default is midnight of
 current day.  If states are given with the \-s option then return jobs
@@ -319,32 +319,32 @@ in this state at this time, 'now' is also used as the default time.
 Valid time formats are...
 HH:MM[:SS] [AM|PM]
 MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
-MM/DD[/YY]-HH:MM[:SS]         
+MM/DD[/YY]-HH:MM[:SS]
 YYYY-MM-DD[THH:MM[:SS]]
 
-.TP 
+.TP
 \f3\-T \fP\f3,\fP \f3\-\-truncate\fP
 Truncate time.  So if a job started before \-\-starttime the start time
 would be truncated to \-\-starttime.  The same for end time and \-\-endtime.
 
-.TP 
+.TP
 \f3\-u \fP\f2uid_list\fP\f3,\fP  \f3\-\-uid\fP\f3=\fP\f2uid_list\fP \f3\-\-user\fP\f3=\fP\f2user_list\fP
 Use this comma seperated list of uids or user names to select jobs to display.  By default, the running
 user's uid is used.
 
-.TP 
+.TP
 \f3\-\-usage\fP
 Displays a help message.
 
-.TP 
+.TP
 \f3\-v \fP\f3,\fP \f3\-\-verbose\fP
 Primarily for debug use reports the state of certain variables during processing.
 
-.TP 
+.TP
 \f3\-V \fP\f3,\fP \f3\-\-version\fP
 Print version.
 
-.TP 
+.TP
 \f3\-W \fP\f2wckey_list\fP\f3,\fP  \f3\-\-wckeys\fP\f3=\fP\f2wckey_list\fP
 Displays the statistics only for the jobs started on the wckeys specified by
 the \f2wckey_list\fP operand, which is a comma\-separated list of
@@ -352,13 +352,13 @@ wckey names.
 Space characters are not allowed in the \f2wckey_list\fP. Default is
 all wckeys\&.
 
-.TP 
+.TP
 \f3\-X \fP\f3,\fP \f3\-\-allocations\fP
 Only show cumulative statistics for each job, not the intermediate steps.
 
 .SS "Job Accounting Fields"
 The following describes each job accounting field:
-.RS 
+.RS
 .TP "10"
 \f3alloccpus\fP
 Count of allocated processors.
@@ -392,48 +392,48 @@ Average Virtual Memory size of a process.
 Block ID, applicable to BlueGene computers only.
 
 .TP
-\f3cluster\fP 
+\f3cluster\fP
 Cluster name.
 
 .TP
 \f3cputime\fP
-Formatted number of cpu seconds a process was allocated. 
+Formatted number of cpu seconds a process was allocated.
 
 .TP
 \f3cputimeraw\fP
 How much cpu time process was allocated in second format, not formatted
 like above.
 
-.TP 
-\f3elapsed\fP 
+.TP
+\f3elapsed\fP
 The jobs elapsed time.
-.IP 
+.IP
 The format of this fields output is as follows:
-.RS 
+.RS
 .PD "0"
-.HP 
-\f2[DD\-[hh:]]mm:ss\fP 
-.PD 
-.RE 
-.IP 
+.HP
+\f2[DD\-[hh:]]mm:ss\fP
+.PD
+.RE
+.IP
 as defined by the following:
-.RS 
+.RS
 .TP "10"
-\f2DD\fP 
+\f2DD\fP
 days
-.TP 
-\f2hh\fP 
+.TP
+\f2hh\fP
 hours
-.TP 
-\f2mm\fP 
+.TP
+\f2mm\fP
 minutes
-.TP 
-\f2ss\fP 
+.TP
+\f2ss\fP
 seconds
-.RE 
+.RE
 
-.TP 
-\f3eligible\fP 
+.TP
+\f3eligible\fP
 When the job became eligible to run.
 
 .TP
@@ -465,22 +465,22 @@ minutes
 seconds
 .RE
 
-.TP 
-\f3exitcode\fP 
+.TP
+\f3exitcode\fP
 The first non\-zero error code returned by any job step.
 
-.TP 
-\f3gid\fP 
+.TP
+\f3gid\fP
 The group identifier of the user who ran the job.
 
 .TP
 \f3group\fP
 The group name of the user who ran the job.
 
-.TP 
-\f3jobid\fP 
+.TP
+\f3jobid\fP
 The number of the job or job step.
-It is in the form: 
+It is in the form:
 \f2job.jobstep\fP\c
 \&.
 
@@ -488,7 +488,7 @@ It is in the form:
 \f3jobname\fP
 The name of the job or job step.
 
-.TP 
+.TP
 \f3layout\fP
 What the layout of a step was when it was running.  This can be used
 to give you an idea of which node ran which rank in your job.
@@ -541,8 +541,8 @@ The node where the mincpu occured.
 \f3mincputask\fP
 The task on mincpunode where the mincpu occured.
 
-.TP 
-\f3ncpus\fP 
+.TP
+\f3ncpus\fP
 Total number of CPUs allocated to the job.
 
 .TP
@@ -553,15 +553,15 @@ List of nodes in job/step.
 \f3nnodes\fP
 Number of nodes in a job or step.
 
-.TP 
-\f3ntasks\fP 
+.TP
+\f3ntasks\fP
 Total number of tasks in a job or step.
 
 .TP
 \f3priority\fP
 Slurm priority.
 
-.TP 
+.TP
 \f3partition\fP
 Identifies the partition on which the job ran.
 
@@ -570,15 +570,15 @@ Identifies the partition on which the job ran.
 Name of Quality of Service.
 
 .TP
-\f3qosraw\fP 
+\f3qosraw\fP
 Id of Quality of Service.
 
-.TP 
-\f3reqcpus\fP 
+.TP
+\f3reqcpus\fP
 Required CPUs.
 
-.TP 
-\f3reserved\fP 
+.TP
+\f3reserved\fP
 How much wall clock time was used as reserved time for this job.  This is
 derived from how long a job was waiting from eligible time to when it
 actually started.
@@ -587,8 +587,8 @@ actually started.
 \f3resvcpu\fP
 Formatted time for how long (cpu secs) a job was reserved for.
 
-.TP 
-\f3resvcpuraw\fP 
+.TP
+\f3resvcpuraw\fP
 Reserved CPUs in second format, not formatted.
 
 .TP
@@ -600,7 +600,7 @@ Initiation time of the job in the same format as \f3end\fP.
 Displays the job status, or state.
 
 Output can be RUNNING, SUSPENDED, COMPLETED,  CANCELLED, FAILED,
-TIMEOUT, or NODE_FAIL. 
+TIMEOUT, or NODE_FAIL.
 
 .TP
 \f3submit\fP
@@ -616,8 +616,8 @@ How long the job was suspended for.
 The amount of system CPU time. (If job was running on multiple cpus this
 is a combination of all the times so this number could be much larger
 than the elapsed time.)
-The format of the output is identical to that of the 
-\f3elapsed\fP 
+The format of the output is identical to that of the
+\f3elapsed\fP
 field.
 
 .TP
@@ -655,38 +655,38 @@ Workload  Characterization  Key.   Arbitrary  string for grouping orthogonal acc
 \f3wckeyid\fP
 Reference to the wckey.
 
-.RE 
+.RE
 .SH "INTERPRETING THE \-DUMP OPTION OUTPUT"
-The 
+The
 .BR "sacct "
-commands 
-\f3\-\-dump\fP 
-option displays data in a horizontal list of fields depending on the 
+commands
+\f3\-\-dump\fP
+option displays data in a horizontal list of fields depending on the
 record type;
-there are three record types: 
+there are three record types:
 \f3JOB_START\fP\c
-\&, 
+\&,
 \f3JOB_STEP\fP\c
-\&, and 
+\&, and
 \f3JOB_TERMINATED\fP\c
 \&.
 There is a subsection that describes the output for each record type.
 .PP
-When the data output is a job accounting field, as described in the 
-section titled "Job Accounting Fields", only the name of the job 
+When the data output is a job accounting field, as described in the
+section titled "Job Accounting Fields", only the name of the job
 accounting field is listed.
 Otherwise, additional information is provided.
 .TP "10"
 \f3Note: \fP\c
-The output for the 
-\f3JOB_STEP\fP 
-and 
-\f3JOB_TERMINATED\fP 
+The output for the
+\f3JOB_STEP\fP
+and
+\f3JOB_TERMINATED\fP
 record types present a pair of fields for the following data:
 Total CPU time, Total User CPU time, and Total System CPU time.
-The first field of each pair is the time in seconds expressed as an 
+The first field of each pair is the time in seconds expressed as an
 integer.
-The second field of each pair is the fractional number of seconds 
+The second field of each pair is the fractional number of seconds
 multiplied by one million.
 Thus, a pair of fields output as "\c
 \f31 024315\fP\c
@@ -694,386 +694,386 @@ Thus, a pair of fields output as "\c
 The least significant digits in the second field are truncated in
 formatted displays.
 .SS "Output for the JOB_START Record Type"
-The following describes the horizontal fields output by the 
+The following describes the horizontal fields output by the
 .BR "sacct "
-\f3\-\-dump\fP 
-option for the 
-\f3JOB_START\fP 
+\f3\-\-dump\fP
+option for the
+\f3JOB_START\fP
 record type.
-.RS 
+.RS
 .TP "10"
 Field #
 Field
-.TP 
+.TP
 1
-\f3job\fP 
-.TP 
+\f3job\fP
+.TP
 2
-\f3partition\fP 
-.TP 
+\f3partition\fP
+.TP
 3
-\f3submitted\fP 
-.TP 
+\f3submitted\fP
+.TP
 4
 The jobs start time;
-this value is the number of non\-leap seconds since the Epoch (00:00:00 
+this value is the number of non\-leap seconds since the Epoch (00:00:00
 UTC, January 1, 1970)
-.TP 
+.TP
 5
 \f3uid.gid\fP
-.TP 
+.TP
 6
 (Reserved)
-.TP 
+.TP
 7
 \f3JOB_START\fP (literal string)
-.TP 
+.TP
 8
 Job Record Version (1)
-.TP 
+.TP
 9
 The number of fields in the record (16)
-.TP 
+.TP
 10
-\f3uid\fP 
-.TP 
+\f3uid\fP
+.TP
 11
-\f3gid\fP 
-.TP 
+\f3gid\fP
+.TP
 12
 The job name
-.TP 
+.TP
 13
 Batch Flag (0=no batch)
-.TP 
+.TP
 14
 Relative SLURM priority
-.TP 
+.TP
 15
-\f3ncpus\fP 
-.TP 
+\f3ncpus\fP
+.TP
 16
-\f3nodes\fP 
-.RE 
+\f3nodes\fP
+.RE
 .SS "Output for the JOB_STEP Record Type"
-The following describes the horizontal fields output by the 
+The following describes the horizontal fields output by the
 .BR "sacct "
-\f3\-\-dump\fP 
-option for the 
-\f3JOB_STEP\fP 
+\f3\-\-dump\fP
+option for the
+\f3JOB_STEP\fP
 record type.
-.RS 
+.RS
 .TP "10"
 Field #
 Field
-.TP 
+.TP
 1
-\f3job\fP 
-.TP 
+\f3job\fP
+.TP
 2
-\f3partition\fP 
-.TP 
+\f3partition\fP
+.TP
 3
-\f3submitted\fP 
-.TP 
+\f3submitted\fP
+.TP
 4
 The jobs start time;
-this value is the number of non\-leap seconds since the Epoch (00:00:00 
+this value is the number of non\-leap seconds since the Epoch (00:00:00
 UTC, January 1, 1970)
-.TP 
+.TP
 5
 \f3uid.gid\fP
-.TP 
+.TP
 6
 (Reserved)
-.TP 
+.TP
 7
 \f3JOB_STEP\fP (literal string)
-.TP 
+.TP
 8
 Job Record Version (1)
-.TP 
+.TP
 9
 The number of fields in the record (38)
-.TP 
+.TP
 10
-\f3jobid\fP 
-.TP 
+\f3jobid\fP
+.TP
 11
-\f3end\fP 
-.TP 
+\f3end\fP
+.TP
 12
 Completion Status;
 the mnemonics, which may appear in uppercase or lowercase, are as follows:
-.RS 
+.RS
 .TP "10"
-\f3CA\fP 
+\f3CA\fP
 Cancelled
 .TP "10"
-\f3CD\fP 
+\f3CD\fP
 Completed successfully
-.TP 
-\f3F\fP 
+.TP
+\f3F\fP
 Failed
-.TP 
-\f3NF\fP 
+.TP
+\f3NF\fP
 Job terminated from node failure
-.TP 
-\f3R\fP 
+.TP
+\f3R\fP
 Running
 .TP
 \f3S\fP
 Suspended
-.TP 
-\f3TO\fP 
+.TP
+\f3TO\fP
 Timed out
-.RE 
-.TP 
+.RE
+.TP
 13
-\f3exitcode\fP 
-.TP 
+\f3exitcode\fP
+.TP
 14
-\f3ntasks\fP 
-.TP 
+\f3ntasks\fP
+.TP
 15
-\f3ncpus\fP 
-.TP 
+\f3ncpus\fP
+.TP
 16
-\f3elapsed\fP 
+\f3elapsed\fP
 time in seconds expressed as an integer
-.TP 
+.TP
 17
 Integer portion of the Total CPU time in seconds for all processes
-.TP 
+.TP
 18
 Fractional portion of the Total CPU time for all processes expressed in microseconds
-.TP 
+.TP
 19
 Integer portion of the Total User CPU time in seconds for all processes
-.TP 
+.TP
 20
-Fractional portion of the Total User CPU time for all processes 
+Fractional portion of the Total User CPU time for all processes
 expressed in microseconds
-.TP 
+.TP
 21
 Integer portion of the Total System CPU time in seconds for all processes
-.TP 
+.TP
 22
-Fractional portion of the Total System CPU time for all processes 
+Fractional portion of the Total System CPU time for all processes
 expressed in microseconds
-.TP 
+.TP
 23
-\f3rss\fP 
-.TP 
+\f3rss\fP
+.TP
 24
-\f3ixrss\fP 
-.TP 
+\f3ixrss\fP
+.TP
 25
-\f3idrss\fP 
-.TP 
+\f3idrss\fP
+.TP
 26
-\f3isrss\fP 
-.TP 
+\f3isrss\fP
+.TP
 27
-\f3minflt\fP 
-.TP 
+\f3minflt\fP
+.TP
 28
-\f3majflt\fP 
-.TP 
+\f3majflt\fP
+.TP
 29
-\f3nswap\fP 
-.TP 
+\f3nswap\fP
+.TP
 30
-\f3inblocks\fP 
-.TP 
+\f3inblocks\fP
+.TP
 31
-\f3outblocks\fP 
-.TP 
+\f3outblocks\fP
+.TP
 32
-\f3msgsnd\fP 
-.TP 
+\f3msgsnd\fP
+.TP
 33
-\f3msgrcv\fP 
-.TP 
+\f3msgrcv\fP
+.TP
 34
-\f3nsignals\fP 
-.TP 
+\f3nsignals\fP
+.TP
 35
-\f3nvcsw\fP 
-.TP 
+\f3nvcsw\fP
+.TP
 36
-\f3nivcsw\fP 
-.TP 
+\f3nivcsw\fP
+.TP
 37
-\f3vsize\fP 
-.TP 
-.RE 
+\f3vsize\fP
+.TP
+.RE
 .SS "Output for the JOB_TERMINATED Record Type"
-The following describes the horizontal fields output by the 
+The following describes the horizontal fields output by the
 .BR "sacct "
-\f3\-\-dump\fP 
-option for the 
+\f3\-\-dump\fP
+option for the
 \f3JOB_TERMINATED\fP (literal string)
 record type.
-.RS 
+.RS
 .TP "10"
 Field #
 Field
-.TP 
+.TP
 1
-\f3job\fP 
-.TP 
+\f3job\fP
+.TP
 2
-\f3partition\fP 
-.TP 
+\f3partition\fP
+.TP
 3
-\f3submitted\fP 
-.TP 
+\f3submitted\fP
+.TP
 4
 The jobs start time;
-this value is the number of non\-leap seconds since the Epoch (00:00:00 
+this value is the number of non\-leap seconds since the Epoch (00:00:00
 UTC, January 1, 1970)
-.TP 
+.TP
 5
 \f3uid.gid\fP
-.TP 
+.TP
 6
 (Reserved)
-.TP 
+.TP
 7
 \f3JOB_TERMINATED\fP (literal string)
-.TP 
+.TP
 8
 Job Record Version (1)
-.TP 
+.TP
 9
 The number of fields in the record (38)
-.IP 
-Although thirty\-eight fields are displayed by the 
+.IP
+Although thirty\-eight fields are displayed by the
 .BR "sacct "
-command for the 
-\f3JOB_TERMINATED\fP 
+command for the
+\f3JOB_TERMINATED\fP
 record, only fields 1 through 12 are recorded in the actual data file;
-the 
+the
 .BR "sacct "
 command aggregates the remainder.
-.TP 
+.TP
 10
 The total elapsed time in seconds for the job.
-.TP 
+.TP
 11
-\f3end\fP 
-.TP 
+\f3end\fP
+.TP
 12
 Completion Status;
 the mnemonics, which may appear in uppercase or lowercase, are as follows:
-.RS 
+.RS
 .TP "10"
-\f3CA\fP 
+\f3CA\fP
 Cancelled
-.TP 
-\f3CD\fP 
+.TP
+\f3CD\fP
 Completed successfully
-.TP 
-\f3F\fP 
+.TP
+\f3F\fP
 Failed
-.TP 
-\f3NF\fP 
+.TP
+\f3NF\fP
 Job terminated from node failure
-.TP 
-\f3R\fP 
+.TP
+\f3R\fP
 Running
-.TP 
-\f3TO\fP 
+.TP
+\f3TO\fP
 Timed out
-.RE 
-.TP 
+.RE
+.TP
 13
-\f3exitcode\fP 
-.TP 
+\f3exitcode\fP
+.TP
 14
-\f3ntasks\fP 
-.TP 
+\f3ntasks\fP
+.TP
 15
-\f3ncpus\fP 
-.TP 
+\f3ncpus\fP
+.TP
 16
-\f3elapsed\fP 
+\f3elapsed\fP
 time in seconds expressed as an integer
-.TP 
+.TP
 17
 Integer portion of the Total CPU time in seconds for all processes
-.TP 
+.TP
 18
 Fractional portion of the Total CPU time for all processes expressed in microseconds
-.TP 
+.TP
 19
 Integer portion of the Total User CPU time in seconds for all processes
-.TP 
+.TP
 20
-Fractional portion of the Total User CPU time for all processes 
+Fractional portion of the Total User CPU time for all processes
 expressed in microseconds
-.TP 
+.TP
 21
 Integer portion of the Total System CPU time in seconds for all processes
-.TP 
+.TP
 22
-Fractional portion of the Total System CPU time for all processes 
+Fractional portion of the Total System CPU time for all processes
 expressed in microseconds
-.TP 
+.TP
 23
-\f3rss\fP 
-.TP 
+\f3rss\fP
+.TP
 24
-\f3ixrss\fP 
-.TP 
+\f3ixrss\fP
+.TP
 25
-\f3idrss\fP 
-.TP 
+\f3idrss\fP
+.TP
 26
-\f3isrss\fP 
-.TP 
+\f3isrss\fP
+.TP
 27
-\f3minflt\fP 
-.TP 
+\f3minflt\fP
+.TP
 28
-\f3majflt\fP 
-.TP 
+\f3majflt\fP
+.TP
 29
-\f3nswap\fP 
-.TP 
+\f3nswap\fP
+.TP
 30
-\f3inblocks\fP 
-.TP 
+\f3inblocks\fP
+.TP
 31
-\f3outblocks\fP 
-.TP 
+\f3outblocks\fP
+.TP
 32
-\f3msgsnd\fP 
-.TP 
+\f3msgsnd\fP
+.TP
 33
-\f3msgrcv\fP 
-.TP 
+\f3msgrcv\fP
+.TP
 34
-\f3nsignals\fP 
-.TP 
+\f3nsignals\fP
+.TP
 35
-\f3nvcsw\fP 
-.TP 
+\f3nvcsw\fP
+.TP
 36
-\f3nivcsw\fP 
-.TP 
+\f3nivcsw\fP
+.TP
 37
-\f3vsize\fP 
+\f3vsize\fP
 .RE
 
 .SH "EXAMPLES"
-This example illustrates the default invocation of the 
+This example illustrates the default invocation of the
 .BR "sacct "
 command:
-.RS 
+.RS
 .PP
-.nf 
+.nf
 .ft 3
 # sacct
 Jobid      Jobname    Partition    Account AllocCPUS State     ExitCode
@@ -1084,15 +1084,15 @@ Jobid      Jobname    Partition    Account AllocCPUS State     ExitCode
 4.0                   srun       acct1               1 COMPLETED         0
 
 .ft 1
-.fi 
-.RE 
+.fi
+.RE
 .PP
-This example shows the same job accounting information with the 
-\f3brief\fP 
+This example shows the same job accounting information with the
+\f3brief\fP
 option.
-.RS 
+.RS
 .PP
-.nf 
+.nf
 .ft 3
 # sacct \-\-brief
 Jobid      Status     Exitcode
@@ -1102,12 +1102,12 @@ Jobid      Status     Exitcode
 4          RUNNING           0
 4.0        COMPLETED         0
 .ft 1
-.fi 
-.RE 
+.fi
+.RE
 .PP
-.RS 
+.RS
 .PP
-.nf 
+.nf
 .ft 3
 # sacct \-\-allocations
 Jobid      Jobname    Partition Account    AllocCPUS  State     Exitcode
@@ -1122,16 +1122,16 @@ Jobid      Jobname    Partition Account    AllocCPUS  State     Exitcode
 10         endscript  andy       acct1          186 COMPLETED         0
 
 .ft 1
-.fi 
-.RE 
+.fi
+.RE
 .PP
-This example demonstrates the ability to customize the output of the 
+This example demonstrates the ability to customize the output of the
 .BR "sacct "
 command.
 The fields are displayed in the order designated on the command line.
-.RS 
+.RS
 .PP
-.nf 
+.nf
 .ft 3
 # sacct \-\-fields=jobid,ncpus,ntasks,nsignals,status
 Jobid     Elapsed    Ncpus     Ntasks   Status
@@ -1144,8 +1144,8 @@ Jobid     Elapsed    Ncpus     Ntasks   Status
 5.0          00:01:31          2        1 COMPLETED
 
 .ft 1
-.fi 
-.RE 
+.fi
+.RE
 .SH "COPYING"
 
 Copyright (C) 2005\-2007 Copyright Hewlett\-Packard Development Company L.P.
@@ -1167,8 +1167,8 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "FILES"
 .TP "10"
-\f3/etc/slurm.conf\fP 
-Entries to this file enable job accounting and 
+\f3/etc/slurm.conf\fP
+Entries to this file enable job accounting and
 designate the job accounting log file that collects system job accounting.
 .TP
 \f3/var/log/slurm_accounting.log\fP
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index fdf4044402a0de6e6c434d699ef6ffb20b453aeb..158e514e39c63e345089c4ada7ff707f80a326a9 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -8,20 +8,20 @@ sacctmgr \- Used to view and modify Slurm account information.
 
 .SH "DESCRIPTION"
 \fBsacctmgr\fR is used to view or modify Slurm account information.
-The account information is maintained within a database with the interface 
+The account information is maintained within a database with the interface
 being provided by \fBslurmdbd\fR (Slurm Database daemon).
-This database can serve as a central storehouse of user and 
+This database can serve as a central storehouse of user and
 computer information for multiple computers at a single site.
 Slurm account information is recorded based upon four parameters
-that form what is referred to as an \fIassociation\fR. 
-These parameters are \fIuser\fR, \fIcluster\fR, \fIpartition\fR, and 
+that form what is referred to as an \fIassociation\fR.
+These parameters are \fIuser\fR, \fIcluster\fR, \fIpartition\fR, and
 \fIaccount\fR. \fIuser\fR is the login name.
-\fIcluster\fR is the name of a Slurm managed cluster as specified by 
-the \fIClusterName\fR parameter in the \fIslurm.conf\fR configuration file. 
+\fIcluster\fR is the name of a Slurm managed cluster as specified by
+the \fIClusterName\fR parameter in the \fIslurm.conf\fR configuration file.
 \fIpartition\fR is the name of a Slurm partition on that cluster.
 \fIaccount\fR is the bank account for a job.
-The intended mode of operation is to initiate the \fBsacctmgr\fR command, 
-add, delete, modify, and/or list \fIassociation\fR records then 
+The intended mode of operation is to initiate the \fBsacctmgr\fR command,
+add, delete, modify, and/or list \fIassociation\fR records then
 commit the changes and exit.
 
 .SH "OPTIONS"
@@ -68,7 +68,7 @@ This is equivalent to the \fBassociations\fR command.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Enable detailed logging. 
+Enable detailed logging.
 This is equivalent to the \fBverbose\fR command.
 
 .TP
@@ -146,7 +146,7 @@ Identical to the \fBlist\fR command.
 
 .TP
 \fBverbose\fP
-Enable detailed logging. 
+Enable detailed logging.
 This includes time\-stamps on data structures, record counts, etc.
 This is an independent command with no options meant for use in interactive mode.
 
@@ -162,11 +162,11 @@ Repeat the last command.
 
 .TP
 \fIaccount\fP
-A bank account, typically specified at job submit time using the 
+A bank account, typically specified at job submit time using the
 \fI\-\-account=\fR option.
 These may be arranged in a hierarchical fashion, for example
 accounts \fIchemistry\fR and \fIphysics\fR may be children of
-the account \fIscience\fR. 
+the account \fIscience\fR.
 The hierarchy may have an arbitrary depth.
 
 .TP
@@ -177,12 +177,12 @@ The entity used to group information consisting of four parameters:
 .TP
 \fIcluster\fP
 The \fIClusterName\fR parameter in the \fIslurm.conf\fR configuration
-file, used to differentiate accounts from on different machines. 
+file, used to differentiate accounts from on different machines.
 
 .TP
 \fIconfiguration\fP
 Used only with the \fIlist\fR or \fIshow\fR command to report current
-system configuration. 
+system configuration.
 
 .TP
 \fIcoordinator\fR
@@ -190,7 +190,7 @@ A special privileged user usually an account manager or such that can
 add users or sub accounts to the account they are coordinator over.
 This should be a trusted person since they can change limits on
 account and user associations inside their realm.
- 
+
 .TP
 \fIqos\fR
 Quality of Service.
@@ -215,14 +215,14 @@ Number used in conjunction with other accounts to determine job priority.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
-\fIGrpCPUMins\fP=<max cpu minutes> 
-Maximum number of CPU minutes running jobs are able to be allocated in 
+\fIGrpCPUMins\fP=<max cpu minutes>
+Maximum number of CPU minutes running jobs are able to be allocated in
 aggregate for this association and all association which are children
 of this association.
 To clear a previously set value use the modify command with a new
 value of \-1.  (NOTE: This limit is not enforced if set on the root
 association of a cluster.  So even though it may appear in sacctmgr
-output it will not be enforced.  
+output it will not be enforced.
 NOTE: This limit only applys when using the Priority Multifactor
 plugin.  The time is decayed using the value of PriorityDecayHalfLife
 or PriorityUsageResetPeriod as set in the slurm.conf.  Currently when
@@ -232,7 +232,7 @@ this will change in future versions of SLURM.)
 
 .TP
 \fIGrpCPUs\fP=<max cpus>
-Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+Maximum number of CPUs running jobs are able to be allocated in aggregate for
 this association and all association which are children of this association.
 To clear a previously set value use the modify command with a new
 value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
@@ -241,31 +241,31 @@ before it is enforced.)
 
 .TP
 \fIGrpJobs\fP=<max jobs>
-Maximum number of running jobs in aggregate for 
+Maximum number of running jobs in aggregate for
 this association and all association which are children of this association.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIGrpNodes\fP=<max nodes>
-Maximum number of nodes running jobs are able to be allocated in aggregate for 
+Maximum number of nodes running jobs are able to be allocated in aggregate for
 this association and all association which are children of this association.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIGrpSubmitJobs\fP=<max jobs>
-Maximum number of jobs which can be in a pending or running state at any time 
-in aggregate for this association and all association which are children of 
+Maximum number of jobs which can be in a pending or running state at any time
+in aggregate for this association and all association which are children of
 this association.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIGrpWall\fP=<max wall>
-Maximum wall clock time running jobs are able to be allocated in aggregate for 
+Maximum wall clock time running jobs are able to be allocated in aggregate for
 this association and all association which are children of this association.
 To clear a previously set value use the modify command with a new value of \-1.
 (NOTE: This limit is not enforced if set on the root
 association of a cluster.  So even though it may appear in sacctmgr
-output it will not be enforced.  
+output it will not be enforced.
 NOTE: This limit only applys when using the Priority Multifactor
 plugin.  The time is decayed using the value of PriorityDecayHalfLife
 or PriorityUsageResetPeriod as set in the slurm.conf.  Currently when
@@ -274,17 +274,17 @@ inside the limit.  No jobs will be killed if this limit is reached,
 this will change in future versions of SLURM.)
 
 .TP
-\fIMaxCPUMins\fP=<max cpu minutes> 
+\fIMaxCPUMins\fP=<max cpu minutes>
 Maximum number of CPU minutes each job is able to use in this account.
-This is overridden if set directly on a user. 
+This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new
-value of \-1. 
+value of \-1.
 
 .TP
 \fIMaxCPUs\fP=<max cpus>
 Maximum number of CPUs each job is able to use in this account.
-This is overridden if set directly on a user. 
+This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new
 value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
@@ -294,14 +294,14 @@ before it is enforced.)
 .TP
 \fIMaxJobs\fP=<max jobs>
 Maximum number of jobs each user is allowed to run at one time in this account.
-This is overridden if set directly on a user. 
+This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIMaxNodes\fP=<max nodes>
 Maximum number of nodes each job is able to use in this account.
-This is overridden if set directly on a user. 
+This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new value of \-1.
 This is a c\-node limit on BlueGene systems.
@@ -316,9 +316,9 @@ To clear a previously set value use the modify command with a new value of \-1.
 .TP
 \fIMaxWall\fP=<max wall>
 Maximum wall clock time each job is able to use in this account.
-This is overridden if set directly on a user. 
+This is overridden if set directly on a user.
 Default is the cluster's limit.
-<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or 
+<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or
 <days>\-<hr>:<min>:<sec> or <days>\-<hr>.
 The value is recorded in minutes with rounding as needed.
 To clear a previously set value use the modify command with a new value of \-1.
@@ -326,7 +326,7 @@ To clear a previously set value use the modify command with a new value of \-1.
 .TP
 \fIQosLevel\fP<operator><comma separated list of qos names>
 Specify the default Quality of Service's that jobs are able to run at
-for this account.  To get a list of valid QOS's use 'sacctmgr list qos'. 
+for this account.  To get a list of valid QOS's use 'sacctmgr list qos'.
 This value will override it's parents value and push down to it's
 children as the new default.  Setting a QosLevel to '' (two single
 quotes with nothing between them) restores it's default setting.  You
@@ -442,7 +442,7 @@ Display information with previously deleted data.
 Display information with subaccounts.  Only really valuable when used
 with the account= option.  This will display all the subaccount
 associations along with the accounts listed in the option.
-              
+
 .TP
 \fIWOLimits\fP
 Display information without limit information. This is for a smaller
@@ -452,7 +452,7 @@ default format of Cluster,Account,User,Partition
 \fIWOPInfo\fP
 Display information without parent information. (i.e. parent id, and
 parent account name.) This option also invokes WOPLIMITS.
-              
+
 .TP
 \fIWOPLimits\fP
 Display information without hierarchical parent limits. (i.e. will
@@ -478,34 +478,34 @@ Number used in conjunction with other accounts to determine job priority.
 
 .TP
 \fIGrpCPUMins\fP
-Maximum number of CPU minutes running jobs are able to be allocated in 
+Maximum number of CPU minutes running jobs are able to be allocated in
 aggregate for this association and all association which are children
 of this association.
 
 .TP
 \fIGrpCPUs\fP
-Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+Maximum number of CPUs running jobs are able to be allocated in aggregate for
 this association and all association which are children of this association.
 
 .TP
 \fIGrpJobs\fP
-Maximum number of running jobs in aggregate for 
+Maximum number of running jobs in aggregate for
 this association and all association which are children of this association.
 
 .TP
 \fIGrpNodes\fP
-Maximum number of nodes running jobs are able to be allocated in aggregate for 
+Maximum number of nodes running jobs are able to be allocated in aggregate for
 this association and all association which are children of this association.
 
 .TP
 \fIGrpSubmitJobs\fP
-Maximum number of jobs which can be in a pending or running state at any time 
-in aggregate for this association and all association which are children of 
+Maximum number of jobs which can be in a pending or running state at any time
+in aggregate for this association and all association which are children of
 this association.
 
 .TP
 \fIGrpWall\fP
-Maximum wall clock time running jobs are able to be allocated in aggregate for 
+Maximum wall clock time running jobs are able to be allocated in aggregate for
 this association and all association which are children of this association.
 
 .TP
@@ -520,7 +520,7 @@ associations with a LFT inside this LFT and before the RGT are
 childern of this association.
 
 .TP
-\fIMaxCPUMins\fP 
+\fIMaxCPUMins\fP
 Maximum number of CPU minutes each job is able to use.
 
 .TP
@@ -581,8 +581,8 @@ The name of a user in the association.
 .TP
 \fIName\fP=<name>
 The name of a cluster.
-This should be equal to the \fIClusterName\fR parameter in the \fIslurm.conf\fR 
-configuration file for some Slurm\-managed cluster. 
+This should be equal to the \fIClusterName\fR parameter in the \fIslurm.conf\fR
+configuration file for some Slurm\-managed cluster.
 
 .TP
 \fIWOLimits\fP
@@ -653,12 +653,12 @@ account or list user.
 
 .TP
 \fIGrpCPUMins\fP
-Maximum number of CPU minutes running jobs are able to be allocated in 
+Maximum number of CPU minutes running jobs are able to be allocated in
 aggregate for this QOS.
 
 .TP
 \fIGrpCPUs\fP
-Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+Maximum number of CPUs running jobs are able to be allocated in aggregate for
 this QOS.
 
 .TP
@@ -667,17 +667,17 @@ Maximum number of running jobs in aggregate for this QOS.
 
 .TP
 \fIGrpNodes\fP
-Maximum number of nodes running jobs are able to be allocated in aggregate for 
+Maximum number of nodes running jobs are able to be allocated in aggregate for
 this QOS.
 
 .TP
 \fIGrpSubmitJobs\fP
-Maximum number of jobs which can be in a pending or running state at any time 
+Maximum number of jobs which can be in a pending or running state at any time
 in aggregate for this QOS.
 
 .TP
 \fIGrpWall\fP
-Maximum wall clock time running jobs are able to be allocated in aggregate for 
+Maximum wall clock time running jobs are able to be allocated in aggregate for
 this QOS.
 
 .TP
@@ -685,7 +685,7 @@ this QOS.
 The id of the QOS.
 
 .TP
-\fIMaxCPUMins\fP 
+\fIMaxCPUMins\fP
 Maximum number of CPU minutes each job is able to use.
 
 .TP
@@ -734,10 +734,10 @@ An arbitrary string describing a QOS.
 
 .TP
 \fIGrpCPUMins\fP
-Maximum number of CPU minutes running jobs are able to be allocated in 
+Maximum number of CPU minutes running jobs are able to be allocated in
 aggregate for this QOS.
 To clear a previously set value use the modify command with a new
-value of \-1. 
+value of \-1.
 NOTE: This limit only applys when using the Priority Multifactor
 plugin.  The time is decayed using the value of PriorityDecayHalfLife
 or PriorityUsageResetPeriod as set in the slurm.conf.  Currently when
@@ -747,7 +747,7 @@ this will change in future versions of SLURM.)
 
 .TP
 \fIGrpCPUs\fP
-Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+Maximum number of CPUs running jobs are able to be allocated in aggregate for
 this QOS.
 To clear a previously set value use the modify command with a new
 value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
@@ -761,19 +761,19 @@ To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIGrpNodes\fP
-Maximum number of nodes running jobs are able to be allocated in aggregate for 
+Maximum number of nodes running jobs are able to be allocated in aggregate for
 this QOS.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIGrpSubmitJobs\fP
-Maximum number of jobs which can be in a pending or running state at any time 
+Maximum number of jobs which can be in a pending or running state at any time
 in aggregate for this QOS.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIGrpWall\fP
-Maximum wall clock time running jobs are able to be allocated in aggregate for 
+Maximum wall clock time running jobs are able to be allocated in aggregate for
 this QOS.
 To clear a previously set value use the modify command with a new value of \-1.
 NOTE: This limit only applys when using the Priority Multifactor
@@ -784,10 +784,10 @@ inside the limit.  No jobs will be killed if this limit is reached,
 this will change in future versions of SLURM.)
 
 .TP
-\fIMaxCPUMins\fP 
+\fIMaxCPUMins\fP
 Maximum number of CPU minutes each job is able to use.
 To clear a previously set value use the modify command with a new
-value of \-1. 
+value of \-1.
 
 .TP
 \fIMaxCPUs\fP
@@ -815,7 +815,7 @@ To clear a previously set value use the modify command with a new value of \-1.
 .TP
 \fIMaxWall\fP
 Maximum wall clock time each job is able to use.
-<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or 
+<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or
 <days>\-<hr>:<min>:<sec> or <days>\-<hr>.
 The value is recorded in minutes with rounding as needed.
 To clear a previously set value use the modify command with a new value of \-1.
@@ -899,7 +899,7 @@ Get information about which associations were affected by the transactions.
 .SH ""
 NOTE: If using the WithAssoc option you can also view the information
 about the various associations the transaction effected.  The
-Association format fields are described 
+Association format fields are described
 in the \fILIST/SHOW ASSOCIATION FORMAT OPTIONS\fP section.
 .RE
 
@@ -919,7 +919,7 @@ Specific cluster to add user to the account on.  Default is all in system.
 
 .TP
 \fIDefaultAccount\fP=<account>
-Identify the default bank account name to be used for a job if none is 
+Identify the default bank account name to be used for a job if none is
 specified at submission time.
 
 .TP
@@ -951,7 +951,7 @@ ASSOCIATION BASED ENTITIES\fP section.
 
 .TP
 \fIAdminLevel\fP
-Admin level of user. 
+Admin level of user.
 
 .TP
 \fIDefaultAccount\fP
@@ -977,7 +977,7 @@ in the \fILIST/SHOW ASSOCIATION FORMAT OPTIONS\fP section.
 
 .TP
 \fIWCKey\fP
-Workload Characterization Key. 
+Workload Characterization Key.
 
 .TP
 \fICluster\fP
@@ -999,7 +999,7 @@ When using the format option for listing various fields you can put a
 %NUMBER afterwards to specify how many characters should be printed.
 
 i.e. format=name%30 will print 30 characters of field name right
-justified.  A \-30 will print 30 characters left justified.  
+justified.  A \-30 will print 30 characters left justified.
 
 .SH "FLAT FILE DUMP AND LOAD"
 sacctmgr has the capability to load and dump SLURM association data to and
@@ -1071,12 +1071,12 @@ are children of this association.
 \fIGrpSubmitJobs=\fP
 Maximum number of jobs which can be in a pending or
 running state at any time in aggregate for this association and all
-association which are children of this association. 
+association which are children of this association.
 .TP
 \fIGrpWall=\fP
 Maximum wall clock time running jobs are able to be
 allocated in aggregate for this association and all association which
-are children of this association. 
+are children of this association.
 .TP
 \fIFairShare=\fP
 Number used in conjunction with other associations to determine job priority.
@@ -1111,7 +1111,7 @@ Followed by Accounts you want in this fashion...
 
 .TP
 Any of the options after a ':' can be left out and they can be in any order.
-If you want to add any sub accounts just list the Parent THAT HAS ALREADY 
+If you want to add any sub accounts just list the Parent THAT HAS ALREADY
 BEEN CREATED before the account line in this fashion...
 .TP
 All account options are
@@ -1145,12 +1145,12 @@ are children of this association.
 \fIGrpSubmitJobs=\fP
 Maximum number of jobs which can be in a pending or
 running state at any time in aggregate for this association and all
-association which are children of this association. 
+association which are children of this association.
 .TP
 \fIGrpWall=\fP
 Maximum wall clock time running jobs are able to be
 allocated in aggregate for this association and all association which
-are children of this association. 
+are children of this association.
 .TP
 \fIFairShare=\fP
 Number used in conjunction with other associations to determine job priority.
@@ -1167,7 +1167,7 @@ Maximum cpu seconds children of this accounts jobs can run.
 \fIMaxWallDurationPerJob=\fP
 Maximum time (not related to job size) children of this accounts jobs can run.
 .TP
-\fIOrganization= 
+\fIOrganization=
 Name of organization that owns this account.
 .TP
 \fI\fIQOS(=,+=,\-=)\fP
@@ -1224,9 +1224,9 @@ Sacctmgr has the capability to archive to a flatfile and or load that
 data if needed later.  The archiving is usually done by the slurmdbd
 and it is highly recommended you only do it through sacctmgr if you
 completely understand what you are doing.  For slurmdbd options see
-"man slurmdbd" for more information. 
+"man slurmdbd" for more information.
 Loading data into the database can be done from these files to either
-view old data or regenerate rolled up data. 
+view old data or regenerate rolled up data.
 
 These are the options for both dump and load of archive information.
 
@@ -1266,14 +1266,14 @@ all step data removed will be lost permanently.
 \fISuspend\fP
 Archive Suspend Data.  If not specified and PurgeSuspendMonths is set
 all suspend data removed will be lost permanently.
-.RE    
-                                                                       
+.RE
+
 archive load
 .TP
 \fIFile=\fP
 File to load into database.
 .TP
-\fIInsert=\fP 
+\fIInsert=\fP
 SQL to insert directly into the database.  This should be used very
 cautiously since this is writing your sql into the database.
 .RE
@@ -1322,13 +1322,13 @@ wrong> sacctmgr modify user name=adam set fairshare=10 cluster=tux
 
 .br
 This will produce an error as the above line reads modify user adam
-set fairshare=10 and cluster=tux.  
+set fairshare=10 and cluster=tux.
 .br
 
 .br
 right> sacctmgr modify user name=adam cluster=tux set fairshare=10
 .br
-right> sacctmgr modify user name=adam set fairshare=10 where cluster=tux 
+right> sacctmgr modify user name=adam set fairshare=10 where cluster=tux
 .br
 
 .br
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 4fd4cea3266b565334c00dccbf076a3737070057..345fe67411e18aee4358c8d5313dabc532ce53b7 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -1,24 +1,24 @@
 .TH "salloc" "1" "SLURM 2.1" "September 2009" "SLURM Commands"
 
 .SH "NAME"
-salloc \- Obtain a SLURM job allocation (a set of nodes), execute a command, 
+salloc \- Obtain a SLURM job allocation (a set of nodes), execute a command,
 and then release the allocation when the command is finished.
 
 .SH "SYNOPSIS"
 salloc [\fIoptions\fP] [<\fIcommand\fP> [\fIcommand args\fR]]
 
 .SH "DESCRIPTION"
-salloc is used to allocate a SLURM job allocation, which is a set of resources 
-(nodes), possibly with some set of constraints (e.g. number of processors per 
-node).  When salloc successfully obtains the requested allocation, it then runs 
-the command specified by the user.  Finally, when the user specified command is 
+salloc is used to allocate a SLURM job allocation, which is a set of resources
+(nodes), possibly with some set of constraints (e.g. number of processors per
+node).  When salloc successfully obtains the requested allocation, it then runs
+the command specified by the user.  Finally, when the user specified command is
 complete, salloc relinquishes the job allocation.
 
-The command may be any program the user wishes.  Some typical commands are 
-xterm, a shell script containing srun commands, and srun (see the EXAMPLES 
-section). If no command is specified, then the value of 
-\fBSallocDefaultCommand\fR in slurm.conf is used. If 
-\fBSallocDefaultCommand\fR is not set, then \fBsalloc\fR runs the 
+The command may be any program the user wishes.  Some typical commands are
+xterm, a shell script containing srun commands, and srun (see the EXAMPLES
+section). If no command is specified, then the value of
+\fBSallocDefaultCommand\fR in slurm.conf is used. If
+\fBSallocDefaultCommand\fR is not set, then \fBsalloc\fR runs the
 user's default shell.
 
 .SH "OPTIONS"
@@ -60,12 +60,12 @@ options if desired:
 When the task/affinity plugin is enabled,
 specifying an allocation in this manner also instructs SLURM to use
 a CPU affinity mask to guarantee the request is filled as specified.
-NOTE: Support for these options are configuration dependent. 
+NOTE: Support for these options are configuration dependent.
 The task/affinity plugin must be configured.
-In addition either select/linear or select/cons_res plugin must be 
+In addition either select/linear or select/cons_res plugin must be
 configured.
-If select/cons_res is configured, it must have a parameter of CR_Core, 
-CR_Core_Memory, CR_Socket, or CR_Socket_Memory. 
+If select/cons_res is configured, it must have a parameter of CR_Core,
+CR_Core_Memory, CR_Socket, or CR_Socket_Memory.
 
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
@@ -125,30 +125,30 @@ An arbitrary comment.
 
 .TP
 \fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
-Specify a list of constraints. 
-The constraints are features that have been assigned to the nodes by 
-the slurm administrator. 
-The \fIlist\fR of constraints may include multiple features separated 
+Specify a list of constraints.
+The constraints are features that have been assigned to the nodes by
+the slurm administrator.
+The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
-For example: \fB\-\-constraint="opteron&video"\fR or 
+For example: \fB\-\-constraint="opteron&video"\fR or
 \fB\-\-constraint="fast|faster"\fR.
 In the first example, only nodes having both the feature "opteron" AND
 the feature "video" will be used.
 There is no mechanism to specify that you want one node with feature
 "opteron" and another node with feature "video" in that case that no
 node has both features.
-If only one of a set of possible options should be used for all allocated 
-nodes, then use the OR operator and enclose the options within square brackets. 
-For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might 
-be used to specify that all nodes must be allocated on a single rack of 
+If only one of a set of possible options should be used for all allocated
+nodes, then use the OR operator and enclose the options within square brackets.
+For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might
+be used to specify that all nodes must be allocated on a single rack of
 the cluster, but any of those four racks can be used.
 A request can also specify the number of nodes needed with some feature
-by appending an asterisk and count after the feature name. 
-For example "\fBsalloc \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR 
+by appending an asterisk and count after the feature name.
+For example "\fBsalloc \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR
 indicates that the job requires 16 nodes at that at least four of those
 nodes must have the feature "graphics."
 Constraints with node counts may only be combined with AND operators.
-If no nodes have the requested features, then the job will be rejected 
+If no nodes have the requested features, then the job will be rejected
 by the slurm job manager.
 
 .TP
@@ -263,7 +263,7 @@ Show this help message
 
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
-Advise the SLURM controller that ensuing job steps will require \fIncpus\fR 
+Advise the SLURM controller that ensuing job steps will require \fIncpus\fR
 number of processors per task.  Without this option, the controller will
 just try to allocate one processor per task.
 
@@ -279,7 +279,7 @@ of 4 nodes, one for each of the 4 tasks.
 \fB\-d\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
-<\fIdependency_list\fR> is of the form 
+<\fIdependency_list\fR> is of the form
 <\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
 Many jobs can share the same dependency and these jobs may even belong to
 different  users. The  value may be changed after job submission using the
@@ -350,8 +350,8 @@ This option was originally created for use by Moab.
 
 .TP
 \fB\-\-gid\fR=<\fIgroup\fR>
-If \fBsalloc\fR is run as root, and the \fB\-\-gid\fR option is used, 
-submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR 
+If \fBsalloc\fR is run as root, and the \fB\-\-gid\fR option is used,
+submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR
 may be the group name or the numerical group ID.
 
 .TP
@@ -381,8 +381,8 @@ show this help message
 
 .TP
 \fB\-I\fR, \fB\-\-immediate\fR[=<\fIseconds\fR>]
-exit if resources are not available within the 
-time period specified. 
+exit if resources are not available within the
+time period specified.
 If no argument is given, resources must be available immediately
 for the request to succeed.
 By default, \fB\-\-immediate\fR is off, and the command
@@ -424,9 +424,9 @@ range of allocated nodes.
 
 .TP
 \fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
-Specification of licenses (or other resources available on all 
+Specification of licenses (or other resources available on all
 nodes of the cluster) which must be allocated to this job.
-License names can be followed by an asterisk and count 
+License names can be followed by an asterisk and count
 (the default count is one).
 Multiple license names should be comma separated (e.g.
 "\-\-licenses=foo*4,bar").
@@ -441,13 +441,13 @@ subsequent srun requests.
 .TP
 .B block
 The block method of distribution will allocate processes in\-order to
-the cpus on a node. If the number of processes exceeds the number of 
-cpus on all of the nodes in the allocation then all nodes will be 
-utilized. For example, consider an allocation of three nodes each with 
-two cpus. A four\-process block distribution request will distribute 
-those processes to the nodes with processes one and two on the first 
-node, process three on the second node, and process four on the third node.  
-Block distribution is the default behavior if the number of tasks 
+the cpus on a node. If the number of processes exceeds the number of
+cpus on all of the nodes in the allocation then all nodes will be
+utilized. For example, consider an allocation of three nodes each with
+two cpus. A four\-process block distribution request will distribute
+those processes to the nodes with processes one and two on the first
+node, process three on the second node, and process four on the third node.
+Block distribution is the default behavior if the number of tasks
 exceeds the number of nodes requested.
 .TP
 .B cyclic
@@ -459,7 +459,7 @@ if the number of tasks is no larger than the number of nodes requested.
 .B plane
 The tasks are distributed in blocks of a specified size.
 The options include a number representing the size of the task block.
-This is followed by an optional specification of the task distribution 
+This is followed by an optional specification of the task distribution
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
 .br
@@ -470,9 +470,9 @@ and
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
 .TP
 .B arbitrary
-The arbitrary method of distribution will allocate processes in\-order as 
+The arbitrary method of distribution will allocate processes in\-order as
 listed in file designated by the environment variable SLURM_HOSTFILE.  If
-this variable is listed it will over ride any other method specified. 
+this variable is listed it will over ride any other method specified.
 If not set the method will default to block.  Inside the hostfile must
 contain at minimum the number of hosts requested.  If requesting tasks
 (\-n) your tasks will be laid out on the nodes in the order of the file.
@@ -480,13 +480,13 @@ contain at minimum the number of hosts requested.  If requesting tasks
 
 .TP
 \fB\-\-mail\-type\fR=<\fItype\fR>
-Notify user by email when certain event types occur. 
-Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change). 
-The user to be notified is indicated with \fB\-\-mail\-user\fR. 
+Notify user by email when certain event types occur.
+Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change).
+The user to be notified is indicated with \fB\-\-mail\-user\fR.
 
 .TP
 \fB\-\-mail\-user\fR=<\fIuser\fR>
-User to receive email notification of state changes as defined by 
+User to receive email notification of state changes as defined by
 \fB\-\-mail\-type\fR.
 The default value is the submitting user.
 
@@ -505,7 +505,7 @@ Also see \fB\-\-mem\-per\-cpu\fR.
 \fB\-\-mem\-per\-cpu\fR=<\fIMB\fR>
 Mimimum memory required per allocated CPU in MegaBytes.
 Default value is \fBDefMemPerCPU\fR and the maximum value is
-\fBMaxMemPerCPU\fR. If configured, both of parameters can be 
+\fBMaxMemPerCPU\fR. If configured, both of parameters can be
 seen using the \fBscontrol show config\fR command.
 This parameter would generally be used of individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
@@ -514,16 +514,16 @@ Also see \fB\-\-mem\fR.
 
 .TP
 \fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
-Bind tasks to memory. Used only when the task/affinity plugin is enabled 
+Bind tasks to memory. Used only when the task/affinity plugin is enabled
 and the NUMA memory functions are available.
-\fBNote that the resolution of CPU and memory binding 
-may differ on some architectures.\fR For example, CPU binding may be performed 
-at the level of the cores within a processor while memory binding will 
-be performed at the level of nodes, where the definition of "nodes" 
-may differ from system to system. \fBThe use of any type other than 
+\fBNote that the resolution of CPU and memory binding
+may differ on some architectures.\fR For example, CPU binding may be performed
+at the level of the cores within a processor while memory binding will
+be performed at the level of nodes, where the definition of "nodes"
+may differ from system to system. \fBThe use of any type other than
 "none" or "local" is not recommended.\fR
-If you want greater control, try running a simple test code with the 
-options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine 
+If you want greater control, try running a simple test code with the
+options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine
 the specific configuration.
 
 NOTE: To have SLURM always report on the selected memory binding for
@@ -602,16 +602,16 @@ A limit on the maximum node count may be specified with \fImaxnodes\fR
 (e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
 same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
 for two and ONLY two nodes).
-The partition's node limits supersede those of the job. 
-If a job's node limits are outside of the range permitted for its 
-associated partition, the job will be left in a PENDING state. 
-This permits possible execution at a later time, when the partition 
+The partition's node limits supersede those of the job.
+If a job's node limits are outside of the range permitted for its
+associated partition, the job will be left in a PENDING state.
+This permits possible execution at a later time, when the partition
 limit is changed.
-If a job node limit exceeds the number of nodes configured in the 
+If a job node limit exceeds the number of nodes configured in the
 partition, the job will be rejected.
-Note that the environment 
-variable \fBSLURM_NNODES\fR will be set to the count of nodes actually 
-allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section 
+Note that the environment
+variable \fBSLURM_NNODES\fR will be set to the count of nodes actually
+allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section
 for more information.  If \fB\-N\fR is not specified, the default
 behavior is to allocate enough nodes to satisfy the requirements of
 the \fB\-n\fR and \fB\-c\fR options.
@@ -620,28 +620,28 @@ and without delaying the initiation of the job.
 
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
-salloc does not launch tasks, it requests an allocation of resources and 
-executed some command. This option advises the SLURM controller that job 
+salloc does not launch tasks, it requests an allocation of resources and
+executed some command. This option advises the SLURM controller that job
 steps run within this allocation will launch a maximum of \fInumber\fR
-tasks and sufficient resources are allocated to accomplish this.  
-The default is one task per socket or core (depending upon the value 
-of the \fISelectTypeParameters\fR parameter in slurm.conf), but note 
+tasks and sufficient resources are allocated to accomplish this.
+The default is one task per socket or core (depending upon the value
+of the \fISelectTypeParameters\fR parameter in slurm.conf), but note
 that the \fB\-\-cpus\-per\-task\fR option will change this default.
 
 .TP
 \fB\-\-network\fR=<\fItype\fR>
-Specify the communication protocol to be used. 
+Specify the communication protocol to be used.
 This option is supported on AIX systems.
-Since POE is used to launch tasks, this option is not normally used or 
+Since POE is used to launch tasks, this option is not normally used or
 is specified using the \fBSLURM_NETWORK\fR environment variable.
 The interpretation of \fItype\fR is system dependent.
-For systems with an IBM Federation switch, the following 
+For systems with an IBM Federation switch, the following
 comma\-separated and case insensitive types are recognized:
-\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, 
-\fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR). 
+\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR,
+\fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR).
 For more information, on IBM systems see \fIpoe\fR documentation on
 the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR.
-Note that only four jobs steps may be active at once on a node with the 
+Note that only four jobs steps may be active at once on a node with the
 \fBBULK_XFER\fR option due to limitations in the Federation switch driver.
 
 .TP
@@ -651,7 +651,7 @@ With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
 to 10000 (lowest priority). Only privileged users can specify
 a negative adjustment. NOTE: This option is presently
-ignored if \fISchedulerType=sched/wiki\fR or 
+ignored if \fISchedulerType=sched/wiki\fR or
 \fISchedulerType=sched/wiki2\fR.
 
 .TP
@@ -738,14 +738,14 @@ performance will likely suffer due to competition for resources within a node.
 
 .TP
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
-When a job is within \fIsig_time\fR seconds of its end time, 
-send it the signal \fIsig_num\fR. 
-Due to the resolution of event handling by SLURM, the signal may 
+When a job is within \fIsig_time\fR seconds of its end time,
+send it the signal \fIsig_num\fR.
+Due to the resolution of event handling by SLURM, the signal may
 be sent up to 60 seconds earlier than specified.
 Both \fIsig_time\fR and \fIsig_num\fR must have integer values
 between zero and 65535.
-By default, no signal is sent before the job's end time. 
-If a \fIsig_num\fR is specified without any \fIsig_time\fR, 
+By default, no signal is sent before the job's end time.
+If a \fIsig_num\fR is specified without any \fIsig_time\fR,
 the default time will be 60 seconds.
 
 .TP
@@ -826,8 +826,8 @@ Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-\-conn\-type\fR=<\fItype\fR>
-Require the partition connection type to be of a certain type.  
-On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.  
+Require the partition connection type to be of a certain type.
+On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
 You should not normally set this option.
 SLURM will normally allocate a TORUS if possible for a given geometry.
@@ -837,10 +837,10 @@ for virtual node mode, and HTC_L for Linux mode.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
-Specify the geometry requirements for the job. The three numbers 
-represent the required geometry giving dimensions in the X, Y and 
-Z directions. For example "\-\-geometry=2x3x4", specifies a block 
-of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on 
+Specify the geometry requirements for the job. The three numbers
+represent the required geometry giving dimensions in the X, Y and
+Z directions. For example "\-\-geometry=2x3x4", specifies a block
+of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on
 Blue Gene).
 
 .TP
@@ -860,7 +860,7 @@ Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-R\fR, \fB\-\-no\-rotate\fR
-Disables rotation of the job's requested geometry in order to fit an 
+Disables rotation of the job's requested geometry in order to fit an
 appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
@@ -902,15 +902,15 @@ Same as \fB\-v, \-\-verbose\fR
 Same as \fB\-\-exclusive\fR
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs 
+Specifies the exit code generated when a SLURM error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
 various SLURM error conditions.
 Also see \fBSLURM_EXIT_IMMEDIATE\fR.
 .TP
 \fBSLURM_EXIT_IMMEDIATE\fR
-Specifies the exit code generated when the \fB\-\-immediate\fR option 
-is used and resources are not currently available. 
+Specifies the exit code generated when the \fB\-\-immediate\fR option
+is used and resources are not currently available.
 This can be used by a script to distinguish application exit codes from
 various SLURM error conditions.
 Also see \fBSLURM_EXIT_ERROR\fR.
@@ -1007,26 +1007,26 @@ Do not free a block on Blue Gene systems only.
 The block name on Blue Gene systems only.
 
 .SH "SIGNALS"
-.LP 
+.LP
 While salloc is waiting for a PENDING job allocation, most signals will cause salloc to revoke the allocation request and exit.
 
-However, if the allocation has been granted and salloc has already started the command 
+However, if the allocation has been granted and salloc has already started the command
 specified in its command line parameters salloc will ignore most signals.  salloc will not exit or release the allocation until the command exits.  The notable exception is SIGHUP; a HUP signal will cause salloc to release the allocation and exit without waiting for the command to finish.
 
 .SH "EXAMPLES"
-.LP 
+.LP
 To get an allocation, and open a new xterm in which srun commands may be typed interactively:
-.IP 
+.IP
 $ salloc \-N16 xterm
-.br 
+.br
 salloc: Granted job allocation 65537
-.br 
+.br
 (at this point the xterm appears, and salloc waits for xterm to exit)
-.br 
+.br
 salloc: Relinquishing job allocation 65537
-.LP 
+.LP
 To grab an allocation of nodes and launch a parallel application on one command line (See the \fBsalloc\fR man page for more examples):
-.IP 
+.IP
 salloc \-N5 srun \-n10 myprogram
 
 .SH "COPYING"
@@ -1049,5 +1049,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
+.LP
 sinfo(1), sattach(1), sbatch(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3)
diff --git a/doc/man/man1/sattach.1 b/doc/man/man1/sattach.1
index f50b7200f0d49c565c666f5846b891e43a8f10cb..47b15628836233bfe8bea37fd0311678f9de1df9 100644
--- a/doc/man/man1/sattach.1
+++ b/doc/man/man1/sattach.1
@@ -1,30 +1,30 @@
 .TH "sattach" "1" "SLURM 2.1" "October 2009" "SLURM Commands"
 .SH "NAME"
-.LP 
+.LP
 sattach \- Attach to a SLURM job step.
 .SH "SYNOPSIS"
-.LP 
+.LP
 sattach [\fIoptions\fP] <jobid.stepid>
 .SH "DESCRIPTION"
-.LP 
+.LP
 sattach attaches to a running SLURM job step.  By attaching, it makes available
 the IO streams of all of the tasks of a running SLURM job step.  It also
 suitable for use with a parallel debugger like TotalView.
 
 .SH "OPTIONS"
-.LP 
+.LP
 
-.TP 
+.TP
 \fB\-h\fR, \fB\-\-help\fR
 Display help information and exit.
 
-.TP 
+.TP
 \fB\-\-input\-filter\fR[=]<\fItask number\fR>
 .PD 0
 .TP
 \fB\-\-output\-filter\fR[=]<\fItask number\fR>
 .PD 0
-.TP 
+.TP
 \fB\-\-error\-filter\fR[=]<\fItask number\fR>
 .PD
 Only transmit standard input to a single task, or print the standard output
@@ -50,7 +50,7 @@ Suppress informational messages from sattach. Errors will still be displayed.
 \fB\-u\fR, \fB\-\-usage\fR
 Display brief usage message and exit.
 
-.TP 
+.TP
 \fB\-V\fR, \fB\-\-version\fR
 Display SLURM version number and exit.
 
@@ -67,14 +67,14 @@ variables settings.
 
 .TP 22
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs 
+Specifies the exit code generated when a SLURM error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
 various SLURM error conditions.
 
 
 .SH "EXAMPLES"
-.LP 
+.LP
 sattach 15.0
 
 sattach \-\-output\-filter 5 65386.15
@@ -99,5 +99,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
+.LP
 sinfo(1), salloc(1), sbatch(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3)
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 5bc3d216912bd6a4e962c078657bc4c2155b374f..88266b3a372a63442e8b321591aa81a48c998d22 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -59,12 +59,12 @@ options if desired:
 When the task/affinity plugin is enabled,
 specifying an allocation in this manner also instructs SLURM to use
 a CPU affinity mask to guarantee the request is filled as specified.
-NOTE: Support for these options are configuration dependent. 
+NOTE: Support for these options are configuration dependent.
 The task/affinity plugin must be configured.
-In addition either select/linear or select/cons_res plugin must be 
+In addition either select/linear or select/cons_res plugin must be
 configured.
-If select/cons_res is configured, it must have a parameter of CR_Core, 
-CR_Core_Memory, CR_Socket, or CR_Socket_Memory. 
+If select/cons_res is configured, it must have a parameter of CR_Core,
+CR_Core_Memory, CR_Socket, or CR_Socket_Memory.
 
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
@@ -113,15 +113,15 @@ already passed for that year, in which case the next year is used.
 
 .TP
 \fB\-\-checkpoint\fR=<\fItime\fR>
-Specifies the interval between creating checkpoints of the job step. 
+Specifies the interval between creating checkpoints of the job step.
 By default, the job step will no checkpoints created.
-Acceptable time formats include "minutes", "minutes:seconds", 
-"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and 
+Acceptable time formats include "minutes", "minutes:seconds",
+"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and
 "days\-hours:minutes:seconds".
 
 .TP
 \fB\-\-checkpoint\-dir\fR=<\fIdirectory\fR>
-Specifies the directory into which the job or job step's checkpoint should 
+Specifies the directory into which the job or job step's checkpoint should
 be written (used by the checkpoint/blcrm and checkpoint/xlch plugins only).
 The default value is the current working directory.
 Checkpoint files will be of the form "<job_id>.ckpt" for jobs
@@ -133,30 +133,30 @@ An arbitrary comment.
 
 .TP
 \fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
-Specify a list of constraints. 
-The constraints are features that have been assigned to the nodes by 
-the slurm administrator. 
-The \fIlist\fR of constraints may include multiple features separated 
+Specify a list of constraints.
+The constraints are features that have been assigned to the nodes by
+the slurm administrator.
+The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
-For example: \fB\-\-constraint="opteron&video"\fR or 
+For example: \fB\-\-constraint="opteron&video"\fR or
 \fB\-\-constraint="fast|faster"\fR.
 In the first example, only nodes having both the feature "opteron" AND
 the feature "video" will be used.
 There is no mechanism to specify that you want one node with feature
 "opteron" and another node with feature "video" in that case that no
 node has both features.
-If only one of a set of possible options should be used for all allocated 
-nodes, then use the OR operator and enclose the options within square brackets. 
-For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might 
-be used to specify that all nodes must be allocated on a single rack of 
+If only one of a set of possible options should be used for all allocated
+nodes, then use the OR operator and enclose the options within square brackets.
+For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might
+be used to specify that all nodes must be allocated on a single rack of
 the cluster, but any of those four racks can be used.
 A request can also specify the number of nodes needed with some feature
-by appending an asterisk and count after the feature name. 
-For example "\fBsbatch \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR 
+by appending an asterisk and count after the feature name.
+For example "\fBsbatch \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR
 indicates that the job requires 16 nodes at that at least four of those
 nodes must have the feature "graphics."
 Constraints with node counts may only be combined with AND operators.
-If no nodes have the requested features, then the job will be rejected 
+If no nodes have the requested features, then the job will be rejected
 by the slurm job manager.
 
 .TP
@@ -271,7 +271,7 @@ Show this help message
 
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
-Advise the SLURM controller that ensuing job steps will require \fIncpus\fR 
+Advise the SLURM controller that ensuing job steps will require \fIncpus\fR
 number of processors per task.  Without this option, the controller will
 just try to allocate one processor per task.
 
@@ -287,7 +287,7 @@ of 4 nodes, one for each of the 4 tasks.
 \fB\-d\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
-<\fIdependency_list\fR> is of the form 
+<\fIdependency_list\fR> is of the form
 <\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
 Many jobs can share the same dependency and these jobs may even belong to
 different  users. The  value may be changed after job submission using the
@@ -322,7 +322,7 @@ it it executed.
 
 .TP
 \fB\-e\fR, \fB\-\-error\fR=<\fIfilename pattern\fR>
-Instruct SLURM to connect the batch script's standard error directly to the 
+Instruct SLURM to connect the batch script's standard error directly to the
 file name specified in the "\fIfilename pattern\fR".
 See the \fB\-\-input\fR option for filename specification options.
 
@@ -366,8 +366,8 @@ This option was originally created for use by Moab.
 
 .TP
 \fB\-\-gid\fR=<\fIgroup\fR>
-If \fBsbatch\fR is run as root, and the \fB\-\-gid\fR option is used, 
-submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR 
+If \fBsbatch\fR is run as root, and the \fB\-\-gid\fR option is used,
+submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR
 may be the group name or the numerical group ID.
 
 .TP
@@ -454,9 +454,9 @@ range of allocated nodes.
 
 .TP
 \fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
-Specification of licenses (or other resources available on all 
+Specification of licenses (or other resources available on all
 nodes of the cluster) which must be allocated to this job.
-License names can be followed by an asterisk and count 
+License names can be followed by an asterisk and count
 (the default count is one).
 Multiple license names should be comma separated (e.g.
 "\-\-licenses=foo*4,bar").
@@ -471,13 +471,13 @@ subsequent srun requests.
 .TP
 .B block
 The block method of distribution will allocate processes in\-order to
-the cpus on a node. If the number of processes exceeds the number of 
-cpus on all of the nodes in the allocation then all nodes will be 
-utilized. For example, consider an allocation of three nodes each with 
-two cpus. A four\-process block distribution request will distribute 
-those processes to the nodes with processes one and two on the first 
-node, process three on the second node, and process four on the third node.  
-Block distribution is the default behavior if the number of tasks 
+the cpus on a node. If the number of processes exceeds the number of
+cpus on all of the nodes in the allocation then all nodes will be
+utilized. For example, consider an allocation of three nodes each with
+two cpus. A four\-process block distribution request will distribute
+those processes to the nodes with processes one and two on the first
+node, process three on the second node, and process four on the third node.
+Block distribution is the default behavior if the number of tasks
 exceeds the number of nodes requested.
 .TP
 .B cyclic
@@ -489,7 +489,7 @@ if the number of tasks is no larger than the number of nodes requested.
 .B plane
 The tasks are distributed in blocks of a specified size.
 The options include a number representing the size of the task block.
-This is followed by an optional specification of the task distribution 
+This is followed by an optional specification of the task distribution
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
 .br
@@ -500,9 +500,9 @@ and
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
 .TP
 .B arbitrary
-The arbitrary method of distribution will allocate processes in\-order as 
+The arbitrary method of distribution will allocate processes in\-order as
 listed in file designated by the environment variable SLURM_HOSTFILE.  If
-this variable is listed it will over ride any other method specified. 
+this variable is listed it will over ride any other method specified.
 If not set the method will default to block.  Inside the hostfile must
 contain at minimum the number of hosts requested.  If requesting tasks
 (\-n) your tasks will be laid out on the nodes in the order of the file.
@@ -510,13 +510,13 @@ contain at minimum the number of hosts requested.  If requesting tasks
 
 .TP
 \fB\-\-mail\-type\fR=<\fItype\fR>
-Notify user by email when certain event types occur. 
-Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change). 
-The user to be notified is indicated with \fB\-\-mail\-user\fR. 
+Notify user by email when certain event types occur.
+Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change).
+The user to be notified is indicated with \fB\-\-mail\-user\fR.
 
 .TP
 \fB\-\-mail\-user\fR=<\fIuser\fR>
-User to receive email notification of state changes as defined by 
+User to receive email notification of state changes as defined by
 \fB\-\-mail\-type\fR.
 The default value is the submitting user.
 
@@ -535,7 +535,7 @@ Also see \fB\-\-mem\-per\-cpu\fR.
 \fB\-\-mem\-per\-cpu\fR=<\fIMB\fR>
 Mimimum memory required per allocated CPU in MegaBytes.
 Default value is \fBDefMemPerCPU\fR and the maximum value is
-\fBMaxMemPerCPU\fR. If configured, both of parameters can be 
+\fBMaxMemPerCPU\fR. If configured, both of parameters can be
 seen using the \fBscontrol show config\fR command.
 This parameter would generally be used of individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
@@ -544,16 +544,16 @@ Also see \fB\-\-mem\fR.
 
 .TP
 \fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
-Bind tasks to memory. Used only when the task/affinity plugin is enabled 
+Bind tasks to memory. Used only when the task/affinity plugin is enabled
 and the NUMA memory functions are available.
-\fBNote that the resolution of CPU and memory binding 
-may differ on some architectures.\fR For example, CPU binding may be performed 
-at the level of the cores within a processor while memory binding will 
-be performed at the level of nodes, where the definition of "nodes" 
-may differ from system to system. \fBThe use of any type other than 
+\fBNote that the resolution of CPU and memory binding
+may differ on some architectures.\fR For example, CPU binding may be performed
+at the level of the cores within a processor while memory binding will
+be performed at the level of nodes, where the definition of "nodes"
+may differ from system to system. \fBThe use of any type other than
 "none" or "local" is not recommended.\fR
-If you want greater control, try running a simple test code with the 
-options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine 
+If you want greater control, try running a simple test code with the
+options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine
 the specific configuration.
 
 NOTE: To have SLURM always report on the selected memory binding for
@@ -632,16 +632,16 @@ A limit on the maximum node count may be specified with \fImaxnodes\fR
 (e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
 same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
 for two and ONLY two nodes).
-The partition's node limits supersede those of the job. 
-If a job's node limits are outside of the range permitted for its 
-associated partition, the job will be left in a PENDING state. 
-This permits possible execution at a later time, when the partition 
+The partition's node limits supersede those of the job.
+If a job's node limits are outside of the range permitted for its
+associated partition, the job will be left in a PENDING state.
+This permits possible execution at a later time, when the partition
 limit is changed.
-If a job node limit exceeds the number of nodes configured in the 
+If a job node limit exceeds the number of nodes configured in the
 partition, the job will be rejected.
-Note that the environment 
-variable \fBSLURM_NNODES\fR will be set to the count of nodes actually 
-allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section 
+Note that the environment
+variable \fBSLURM_NNODES\fR will be set to the count of nodes actually
+allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section
 for more information.  If \fB\-N\fR is not specified, the default
 behavior is to allocate enough nodes to satisfy the requirements of
 the \fB\-n\fR and \fB\-c\fR options.
@@ -650,28 +650,28 @@ and without delaying the initiation of the job.
 
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
-sbatch does not launch tasks, it requests an allocation of resources and 
-submits a batch script. This option advises the SLURM controller that job 
+sbatch does not launch tasks, it requests an allocation of resources and
+submits a batch script. This option advises the SLURM controller that job
 steps run within this allocation will launch a maximum of \fInumber\fR
-tasks and sufficient resources are allocated to accomplish this.  
-The default is one task per socket or core (depending upon the value 
-of the \fISelectTypeParameters\fR parameter in slurm.conf), but note 
+tasks and sufficient resources are allocated to accomplish this.
+The default is one task per socket or core (depending upon the value
+of the \fISelectTypeParameters\fR parameter in slurm.conf), but note
 that the \fB\-\-cpus\-per\-task\fR option will change this default.
 
 .TP
 \fB\-\-network\fR=<\fItype\fR>
-Specify the communication protocol to be used. 
+Specify the communication protocol to be used.
 This option is supported on AIX systems.
-Since POE is used to launch tasks, this option is not normally used or 
+Since POE is used to launch tasks, this option is not normally used or
 is specified using the \fBSLURM_NETWORK\fR environment variable.
 The interpretation of \fItype\fR is system dependent.
-For systems with an IBM Federation switch, the following 
+For systems with an IBM Federation switch, the following
 comma\-separated and case insensitive types are recognized:
-\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, 
-\fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR). 
+\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR,
+\fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR).
 For more information, on IBM systems see \fIpoe\fR documentation on
 the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR.
-Note that only four jobs steps may be active at once on a node with the 
+Note that only four jobs steps may be active at once on a node with the
 \fBBULK_XFER\fR option due to limitations in the Federation switch driver.
 
 .TP
@@ -681,17 +681,17 @@ With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
 to 10000 (lowest priority). Only privileged users can specify
 a negative adjustment. NOTE: This option is presently
-ignored if \fISchedulerType=sched/wiki\fR or 
+ignored if \fISchedulerType=sched/wiki\fR or
 \fISchedulerType=sched/wiki2\fR.
 
 .TP
 \fB\-\-no\-requeue\fR
 Specifies that the batch job should not be requeued after node failure.
-Setting this option will prevent system administrators from being able 
+Setting this option will prevent system administrators from being able
 to restart the job (for example, after a scheduled downtime).
 When a job is requeued, the batch script is initiated from its beginning.
 Also see the \fB\-\-requeue\fR option.
-The \fIJobRequeue\fR configuration parameter controls the default 
+The \fIJobRequeue\fR configuration parameter controls the default
 behavior on the cluster.
 
 .TP
@@ -739,7 +739,7 @@ allowing more than one task per processor.  However no more than
 
 .TP
 \fB\-o\fR, \fB\-\-output\fR=<\fIfilename pattern\fR>
-Instruct SLURM to connect the batch script's standard output directly to the 
+Instruct SLURM to connect the batch script's standard output directly to the
 file name specified in the "\fIfilename pattern\fR".
 See the \fB\-\-input\fR option for filename specification options.
 
@@ -816,7 +816,7 @@ the SLURM configuration parameter, AccountingStorageEnforce, includes
 Specifies that the batch job should be requeued after node failure.
 When a job is requeued, the batch script is initiated from its beginning.
 Also see the \fB\-\-no\-requeue\fR option.
-The \fIJobRequeue\fR configuration parameter controls the default 
+The \fIJobRequeue\fR configuration parameter controls the default
 behavior on the cluster.
 
 .TP
@@ -833,14 +833,14 @@ performance will likely suffer due to competition for resources within a node.
 
 .TP
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
-When a job is within \fIsig_time\fR seconds of its end time, 
-send it the signal \fIsig_num\fR. 
-Due to the resolution of event handling by SLURM, the signal may 
+When a job is within \fIsig_time\fR seconds of its end time,
+send it the signal \fIsig_num\fR.
+Due to the resolution of event handling by SLURM, the signal may
 be sent up to 60 seconds earlier than specified.
 Both \fIsig_time\fR and \fIsig_num\fR must have integer values
 between zero and 65535.
-By default, no signal is sent before the job's end time. 
-If a \fIsig_num\fR is specified without any \fIsig_time\fR, 
+By default, no signal is sent before the job's end time.
+If a \fIsig_num\fR is specified without any \fIsig_time\fR,
 the default time will be 60 seconds.
 
 .TP
@@ -929,8 +929,8 @@ Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-\-conn\-type\fR=<\fItype\fR>
-Require the partition connection type to be of a certain type.  
-On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.  
+Require the partition connection type to be of a certain type.
+On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
 You should not normally set this option.
 SLURM will normally allocate a TORUS if possible for a given geometry.
@@ -940,10 +940,10 @@ for virtual node mode, and HTC_L for Linux mode.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
-Specify the geometry requirements for the job. The three numbers 
-represent the required geometry giving dimensions in the X, Y and 
-Z directions. For example "\-\-geometry=2x3x4", specifies a block 
-of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on 
+Specify the geometry requirements for the job. The three numbers
+represent the required geometry giving dimensions in the X, Y and
+Z directions. For example "\-\-geometry=2x3x4", specifies a block
+of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on
 Blue Gene).
 
 .TP
@@ -963,7 +963,7 @@ Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-R\fR, \fB\-\-no\-rotate\fR
-Disables rotation of the job's requested geometry in order to fit an 
+Disables rotation of the job's requested geometry in order to fit an
 appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
@@ -1012,7 +1012,7 @@ Same as \fB\-m, \-\-distribution\fR
 Same as \fB\-\-exclusive\fR
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs 
+Specifies the exit code generated when a SLURM error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
 various SLURM error conditions.
@@ -1138,7 +1138,7 @@ The block name on Blue Gene systems only.
 .LP
 Specify a batch script by filename on the command line.
 The batch script specifies a 1 minute time limit for the job.
-.IP 
+.IP
 $ cat myscript
 .br
 #!/bin/sh
@@ -1165,9 +1165,9 @@ host3
 .br
 host4
 
-.LP 
+.LP
 Pass a batch script to sbatch on standard input:
-.IP 
+.IP
 $ sbatch \-N4 <<EOF
 .br
 > #!/bin/sh
@@ -1210,5 +1210,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
+.LP
 sinfo(1), sattach(1), salloc(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3)
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index 85215e0e36c02ee7e6fea587d69ab8ef1b9b96ca..20d8bf0835fd6cdb918029fa04bd5af087a1d041 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -4,20 +4,20 @@
 sbcast \- transmit a file to the nodes allocated to a SLURM job.
 
 .SH "SYNOPSIS"
-\fBsbcast\fR [\-CfpstvV] SOURCE DEST 
+\fBsbcast\fR [\-CfpstvV] SOURCE DEST
 
 .SH "DESCRIPTION"
-\fBsbcast\fR is used to transmit a file to all nodes allocated 
+\fBsbcast\fR is used to transmit a file to all nodes allocated
 to the currenly active SLURM job.
 This command should only be executed from within a SLURM batch
-job or within the shell spawned after a SLURM job's resource 
-allocation. 
+job or within the shell spawned after a SLURM job's resource
+allocation.
 \fBSOURCE\fR is the name of a file on the current node.
-\fBDEST\fR should be the fully qualified pathname for the 
-file copy to be created on each node. 
+\fBDEST\fR should be the fully qualified pathname for the
+file copy to be created on each node.
 \fBDEST\fR should be on a file system local to that node.
-Note that parallel file systems \fImay\fR provide better performance 
-than \fBsbcast\fR can provide, although performance will vary 
+Note that parallel file systems \fImay\fR provide better performance
+than \fBsbcast\fR can provide, although performance will vary
 by file size, degree of parallelism, and network type.
 
 .SH "OPTIONS"
@@ -33,20 +33,20 @@ Specify the fanout of messages used for file transfer.
 Maximum value is currently eight.
 .TP
 \fB\-p\fR, \fB\-\-preserve\fR
-Preserves modification times, access times, and modes from the 
+Preserves modification times, access times, and modes from the
 original file.
 .TP
 \fB\-s\fR \fIsize\fR, \fB\-\-size\fR=\fIsize\fR
 Specify the block size used for file broadcast.
-The size can have a suffix of \fIk\fR or \fIm\fR for kilobytes 
+The size can have a suffix of \fIk\fR or \fIm\fR for kilobytes
 or megabytes respecitively (defaults to bytes).
-This size subject to rounding and range limits to maintain 
-good performance. This value may need to be set on systems 
+This size subject to rounding and range limits to maintain
+good performance. This value may need to be set on systems
 with very limited memory.
 .TP
 \fB\-t\fB \fIseconds\fR, fB\-\-timeout\fR=\fIseconds\fR
 Specify the message timeout in seconds.
-The default value is \fIMessageTimeout\fR as reported by 
+The default value is \fIMessageTimeout\fR as reported by
 "scontrol show config".
 Setting a higher value may be necessitated by relatively slow
 I/O performance on the compute node disks.
@@ -57,11 +57,11 @@ Provide detailed event logging through program execution.
 \fB\-V\fR, \fB\-\-version\fR
 Print version information and exit.
 
-.SH "ENVIRONMENT VARIABLES" 
-.PP 
-Some \fBsbcast\fR options may be set via environment variables. 
-These environment variables, along with their corresponding options, 
-are listed below. (Note: Command line options will always override 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+Some \fBsbcast\fR options may be set via environment variables.
+These environment variables, along with their corresponding options,
+are listed below. (Note: Command line options will always override
 these settings.)
 .TP 20
 \fBSBCAST_COMPRESS\fR
@@ -84,7 +84,7 @@ these settings.)
 
 .SH "EXAMPLE"
 
-Using a batch script, transmit local file \fBmy.prog\fR to 
+Using a batch script, transmit local file \fBmy.prog\fR to
 \fB/tmp/my.proc\fR on the local nodes and then execute it.
 
 .nf
diff --git a/doc/man/man1/scancel.1 b/doc/man/man1/scancel.1
index e9fc04c05585cb5f722bb4ca5665074f13e08e07..ce3e2b6cf301034bbd30d013c581e7e6a6556e89 100644
--- a/doc/man/man1/scancel.1
+++ b/doc/man/man1/scancel.1
@@ -7,12 +7,12 @@ scancel \- Used to signal jobs or job steps that are under the control of Slurm.
 \fBscancel\fR [\fIOPTIONS\fR...] [\fIjob_id\fR[.\fIstep_id\fR]] [\fIjob_id\fR[.\fIstep_id\fR]...]
 
 .SH "DESCRIPTION"
-\fBscancel\fR is used to signal or cancel jobs or job steps. An arbitrary number 
+\fBscancel\fR is used to signal or cancel jobs or job steps. An arbitrary number
 of jobs or job steps may be signaled using job specification filters or a
-space separated list of specific job and/or job step IDs. A job or job step 
-can only be signaled by the owner of that job or user root. If an attempt is 
-made by an unauthorized user to signal a job or job step, an error message will 
-be printed and the job will not be signaled. 
+space separated list of specific job and/or job step IDs. A job or job step
+can only be signaled by the owner of that job or user root. If an attempt is
+made by an unauthorized user to signal a job or job step, an error message will
+be printed and the job will not be signaled.
 
 .SH "OPTIONS"
 
@@ -26,7 +26,7 @@ Signal the batch job shell and its child processes.
 This is not applicable if \fIstep_id\fR is specified.
 NOTE: The shell itself may exit upon receipt of many signals.
 You may avoid this by explicitly trap signals within the shell
-script (e.g. "trap <arg> <signals>"). See the shell documentation 
+script (e.g. "trap <arg> <signals>"). See the shell documentation
 for details.
 
 .TP
@@ -85,14 +85,14 @@ This option is incompatible with the \fB\-\-quiet\fR option.
 
 .TP
 \fB\-V\fR, \fB\-\-version\fR
-Print the version number of the scancel command. 
+Print the version number of the scancel command.
 
 .TP
 \fB\-w\fR, \fB\-\-nodelist=\fIhost1,host2,...\fR
-Cancel any jobs using any of the given hosts.  The list may be specified as 
-a comma\-separated list of hosts, a range of hosts (host[1\-5,7,...] for 
-example), or a filename. The host list will be assumed to be a filename only 
-if it contains a "/" character. 
+Cancel any jobs using any of the given hosts.  The list may be specified as
+a comma\-separated list of hosts, a range of hosts (host[1\-5,7,...] for
+example), or a filename. The host list will be assumed to be a filename only
+if it contains a "/" character.
 
 .TP
 \fB\-\-wckey\fR=\fIwckey\fR
@@ -108,23 +108,23 @@ The Slurm job ID to be signaled.
 
 .TP
 \fIstep_id\fP
-The step ID of the job step to be signaled. 
+The step ID of the job step to be signaled.
 If not specified, the operation is performed at the level of a job.
 
-If neither \fB\-\-batch\fR nor \fB\-\-signal\fR are used, 
+If neither \fB\-\-batch\fR nor \fB\-\-signal\fR are used,
 the entire job will be terminated.
 
 When \fB\-\-batch\fR is used, the batch shell processes will be signaled.
-The child processes of the shell will not be signalled by SLURM, but 
+The child processes of the shell will not be signalled by SLURM, but
 the shell may forward the signal.
 
-When \fB\-\-batch\fR is not used but \fB\-\-signal\fR is used, 
-then all job steps will be signalled, but the batch script itself 
+When \fB\-\-batch\fR is not used but \fB\-\-signal\fR is used,
+then all job steps will be signalled, but the batch script itself
 will not be signalled.
 
 .SH "ENVIRONMENT VARIABLES"
 .PP
-Some \fBscancel\fR options may be set via environment variables. These 
+Some \fBscancel\fR options may be set via environment variables. These
 environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
 .TP 20
@@ -166,15 +166,15 @@ The location of the SLURM configuration file.
 
 .SH "NOTES"
 .LP
-If multiple filters are supplied (e.g. \fB\-\-partition\fR and \fB\-\-name\fR) 
+If multiple filters are supplied (e.g. \fB\-\-partition\fR and \fB\-\-name\fR)
 only the jobs satisfying all of the filtering options will be signaled.
 .LP
-If a signal value of "KILL" (the default value) is to be sent to an entire 
-job, this will result in the job's termination and its resource allocation 
+If a signal value of "KILL" (the default value) is to be sent to an entire
+job, this will result in the job's termination and its resource allocation
 being released.
 .LP
-Cancelling a job step will not result in a job being terminated. 
-The job must be cancelled to release a resource allocation. 
+Cancelling a job step will not result in a job being terminated.
+The job must be cancelled to release a resource allocation.
 
 .SH "EXAMPLES"
 .TP
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 5540b5bd687771d7dc8a98b16149d661e2af821f..836d1cba1a6ec12d52a7ea012f2e538a74269096 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -7,26 +7,26 @@ scontrol \- Used view and modify Slurm configuration and state.
 \fBscontrol\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...]
 
 .SH "DESCRIPTION"
-\fBscontrol\fR is used to view or modify Slurm configuration including: job, 
-job step, node, partition, reservation, and overall system configuration. Most 
-of the commands can only be executed by user root. If an attempt to view or modify 
-configuration information is made by an unauthorized user, an error message 
-will be printed and the requested action will not occur. If no command is 
-entered on the execute line, \fBscontrol\fR will operate in an interactive 
-mode and prompt for input. It will continue prompting for input and executing 
-commands until explicitly terminated. If a command is entered on the execute 
-line, \fBscontrol\fR will execute that command and terminate. All commands 
-and options are case\-insensitive, although node names, partition names, and 
-reservation names are case\-sensitive (node names "LX" and "lx" are distinct). 
-All commands and options can be abbreviated to the extent that the 
+\fBscontrol\fR is used to view or modify Slurm configuration including: job,
+job step, node, partition, reservation, and overall system configuration. Most
+of the commands can only be executed by user root. If an attempt to view or modify
+configuration information is made by an unauthorized user, an error message
+will be printed and the requested action will not occur. If no command is
+entered on the execute line, \fBscontrol\fR will operate in an interactive
+mode and prompt for input. It will continue prompting for input and executing
+commands until explicitly terminated. If a command is entered on the execute
+line, \fBscontrol\fR will execute that command and terminate. All commands
+and options are case\-insensitive, although node names, partition names, and
+reservation names are case\-sensitive (node names "LX" and "lx" are distinct).
+All commands and options can be abbreviated to the extent that the
 specification is unique.
 
 .SH "OPTIONS"
 .TP
 \fB\-a\fR, \fB\-\-all\fR
-When the \fIshow\fR command is used, then display all partitions, their jobs 
-and jobs steps. This causes information to be displayed about partitions 
-that are configured as hidden and partitions that are unavailable to user's 
+When the \fIshow\fR command is used, then display all partitions, their jobs
+and jobs steps. This causes information to be displayed about partitions
+that are configured as hidden and partitions that are unavailable to user's
 group.
 .TP
 \fB\-d\fR, \fB\-\-detail\fR
@@ -36,8 +36,8 @@ Causes the \fBshow\fR command to provide additional details where available.
 Print a help message describing the usage of scontrol.
 .TP
 \fB\-\-hide\fR
-Do not display information about hidden partitions, their jobs and job steps. 
-By default, neither partitions that are configured as hidden nor those partitions 
+Do not display information about hidden partitions, their jobs and job steps.
+By default, neither partitions that are configured as hidden nor those partitions
 unavailable to user's group will be displayed (i.e. this is the default behavior).
 .TP
 \fB\-o\fR, \fB\-\-oneliner\fR
@@ -47,7 +47,7 @@ Print information one line per record.
 Print no warning or informational messages, only fatal error messages.
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Print detailed event logging. Multiple \fB\-v\fR's will further increase 
+Print detailed event logging. Multiple \fB\-v\fR's will further increase
 the verbosity of logging. By default only errors will be displayed.
 
 .TP
@@ -58,7 +58,7 @@ Print version information and exit.
 
 .TP
 \fBall\fP
-Show all partitions, their jobs and jobs steps. This causes information to be 
+Show all partitions, their jobs and jobs steps. This causes information to be
 displayed about partitions that are configured as hidden and partitions that
 are unavailable to user's group.
 
@@ -76,22 +76,22 @@ Acceptable values for \fICKPT_OP\fP include:
 .RS
 .TP
 \fIdisable\fP (disable future checkpoints)
-.TP 
+.TP
 \fIenable\fP (enable future checkpoints)
 .TP
 \fIable\fP (test if presently not disabled, report start time if checkpoint in progress)
 .TP
 \fIcreate\fP (create a checkpoint and continue the job step)
 .TP
-\fIvacate\fP (create a checkpoint and terminate the job step) 
+\fIvacate\fP (create a checkpoint and terminate the job step)
 .TP
-\fIerror\fP (report the result for the last checkpoint request, error code and message) 
+\fIerror\fP (report the result for the last checkpoint request, error code and message)
 .TP
 \fIrestart\fP (restart execution of the previously checkpointed job steps)
 .TP
 Acceptable values for \fICKPT_OP\fP include:
 .TP
-\fIMaxWait=<seconds>\fP maximum time for checkpoint to be written. 
+\fIMaxWait=<seconds>\fP maximum time for checkpoint to be written.
 Default value is 10 seconds.
 Valid with \fIcreate\fP and \fIvacate\fP options only.
 .TP
@@ -106,13 +106,13 @@ Valid with the \fIrestart\fP option only.
 
 .TP
 \fBcreate\fP \fISPECIFICATION\fP
-Create a new partition or reservation.  See the full list of parameters 
+Create a new partition or reservation.  See the full list of parameters
 below.  Include the tag "res" to create a reservation without specifying
 a reservation name.
 
 .TP
 \fBcompleting\fP
-Display all jobs in a COMPLETING state along with associated nodes in either a 
+Display all jobs in a COMPLETING state along with associated nodes in either a
 COMPLETING or DOWN state.
 
 .TP
@@ -125,14 +125,14 @@ The two \fISPECIFICATION\fP choices are \fIPartitionName=<name>\fP and
 .TP
 \fBdetail\fP
 Causes the \fIshow\fP command to provide additional details where available,
-namely the specific CPUs and NUMA memory allocated on each node. 
+namely the specific CPUs and NUMA memory allocated on each node.
 Note that on computers with hyperthreading enabled and SLURM configured to
 allocate cores, each listed CPU represents one physical core.
 Each hyperthread on that core can be allocated a separate task, so a job's
 CPU count and task count may differ.
 See the \fB\-\-cpu_bind\fR and \fB\-\-mem_bind\fR option descriptions in
 srun man pages for more information.
-The \fBdetail\fP option is currently only supported for the \fIshow job\fP 
+The \fBdetail\fP option is currently only supported for the \fIshow job\fP
 command.
 
 .TP
@@ -146,13 +146,13 @@ Display a description of scontrol options and commands.
 
 .TP
 \fBhide\fP
-Do not display partition, job or jobs step information for partitions that are 
-configured as hidden or partitions that are unavailable to the user's group. 
+Do not display partition, job or jobs step information for partitions that are
+configured as hidden or partitions that are unavailable to the user's group.
 This is the default behavior.
 
 .TP
 \fBnotify\fP \fIjob_id\fP \fImessage\fP
-Send a message to standard error of the srun command associated with the 
+Send a message to standard error of the srun command associated with the
 specified \fIjob_id\fP.
 
 .TP
@@ -161,7 +161,7 @@ Print information one line per record.
 
 .TP
 \fBpidinfo\fP \fIproc_id\fP
-Print the Slurm job id and scheduled termination time corresponding to the 
+Print the Slurm job id and scheduled termination time corresponding to the
 supplied process id, \fIproc_id\fP, on the current node.  This will work only
 with processes on node on which scontrol is run, and only for those processes
 spawned by SLURM and their descendants.
@@ -170,11 +170,11 @@ spawned by SLURM and their descendants.
 \fBlistpids\fP [\fIjob_id\fP[.\fIstep_id\fP]] [\fINodeName\fP]
 Print a listing of the process IDs in a job step (if JOBID.STEPID is provided),
 or all of the job steps in a job (if \fIjob_id\fP is provided), or all of the job
-steps in all of the jobs on the local node (if \fIjob_id\fP is not provided 
-or \fIjob_id\fP is "*").  This will work only with processes on the node on 
+steps in all of the jobs on the local node (if \fIjob_id\fP is not provided
+or \fIjob_id\fP is "*").  This will work only with processes on the node on
 which scontrol is run, and only for those processes spawned by SLURM and
 their descendants. Note that some SLURM configurations
-(\fIProctrackType\fP value of \fIpgid\fP or \fIaix\fP) 
+(\fIProctrackType\fP value of \fIpgid\fP or \fIaix\fP)
 are unable to identify all processes associated with a job or job step.
 
 Note that the NodeName option is only really useful when you have multiple
@@ -183,7 +183,7 @@ one host are, in general, only used by SLURM developers.
 
 .TP
 \fBping\fP
-Ping the primary and secondary slurmctld daemon and report if 
+Ping the primary and secondary slurmctld daemon and report if
 they are responding.
 
 .TP
@@ -196,18 +196,18 @@ Terminate the execution of scontrol.
 
 .TP
 \fBreconfigure\fP
-Instruct all Slurm daemons to re\-read the configuration file. 
+Instruct all Slurm daemons to re\-read the configuration file.
 This command does not restart the daemons.
-This mechanism would be used to modify configuration parameters (Epilog, 
-Prolog, SlurmctldLogFile, SlurmdLogFile, etc.) register the physical 
-addition or removal of nodes from the cluster or recognize the change 
-of a node's configuration, such as the addition of memory or processors. 
-The Slurm controller (slurmctld) forwards the request all other daemons 
+This mechanism would be used to modify configuration parameters (Epilog,
+Prolog, SlurmctldLogFile, SlurmdLogFile, etc.) register the physical
+addition or removal of nodes from the cluster or recognize the change
+of a node's configuration, such as the addition of memory or processors.
+The Slurm controller (slurmctld) forwards the request all other daemons
 (slurmd daemon on each compute node). Running jobs continue execution.
-Most configuration parameters can be changed by just running this command, 
-however, SLURM daemons should be shutdown and restarted if any of these 
-parameters are to be changed: AuthType, BackupAddr, BackupController, 
-ControlAddr, ControlMach, PluginDir, StateSaveLocation, SlurmctldPort 
+Most configuration parameters can be changed by just running this command,
+however, SLURM daemons should be shutdown and restarted if any of these
+parameters are to be changed: AuthType, BackupAddr, BackupController,
+ControlAddr, ControlMach, PluginDir, StateSaveLocation, SlurmctldPort
 or SlurmdPort.
 
 .TP
@@ -221,93 +221,93 @@ Requeue a running or pending SLURM batch job.
 .TP
 \fBsetdebug\fP \fILEVEL\fP
 Change the debug level of the slurmctld daemon.
-\fILEVEL\fP may be an integer value between zero and nine (using the 
-same values as \fISlurmctldDebug\fP in the \fIslurm.conf\fP file) or 
-the name of the most detailed message type to be printed: 
-"quiet", "fatal", "error", "info", "verbose", "debug", "debug2", "debug3", 
+\fILEVEL\fP may be an integer value between zero and nine (using the
+same values as \fISlurmctldDebug\fP in the \fIslurm.conf\fP file) or
+the name of the most detailed message type to be printed:
+"quiet", "fatal", "error", "info", "verbose", "debug", "debug2", "debug3",
 "debug4", or "debug5".
-This value is temporary and will be overwritten whenever the slurmctld 
-daemon reads the slurm.conf configuration file (e.g. when the daemon 
+This value is temporary and will be overwritten whenever the slurmctld
+daemon reads the slurm.conf configuration file (e.g. when the daemon
 is restarted or \fBscontrol reconfigure\fR is executed).
 
 .TP
 \fBshow\fP \fIENTITY\fP \fIID\fP
 Display the state of the specified entity with the specified identification.
-\fIENTITY\fP may be \fIconfig\fP, \fIdaemons\fP, \fIjob\fP, \fInode\fP, 
+\fIENTITY\fP may be \fIconfig\fP, \fIdaemons\fP, \fIjob\fP, \fInode\fP,
 \fIpartition\fP, \fIreservation\fP, \fIslurmd\fP, \fIstep\fP, \fItopology\fP,
-\fIhostlist\fP or \fIhostnames\fP 
+\fIhostlist\fP or \fIhostnames\fP
 (also \fIblock\fP or \fIsubbp\fP on BlueGene systems).
-\fIID\fP can be used to identify a specific element of the identified 
-entity: the configuration parameter name, job ID, node name, partition name, 
-reservation name, or job step ID for \fIconfig\fP, \fIjob\fP, \fInode\fP, 
-\fIpartition\fP, or \fIstep\fP respectively. 
+\fIID\fP can be used to identify a specific element of the identified
+entity: the configuration parameter name, job ID, node name, partition name,
+reservation name, or job step ID for \fIconfig\fP, \fIjob\fP, \fInode\fP,
+\fIpartition\fP, or \fIstep\fP respectively.
 For an \fIENTITY\fP of \fItopology\fP, the \fIID\fP may be a node or switch name.
-If one node name is specified, all switches connected to that node (and 
+If one node name is specified, all switches connected to that node (and
 their parent switches) will be shown.
-If more than one node name is specified, only switches that connect to all 
+If more than one node name is specified, only switches that connect to all
 named nodes will be shown.
-\fIhostnames\fP takes an optional hostlist expression as input and 
-writes a list of individual host names to standard output (one per 
-line). If no hostlist expression is supplied, the contents of the 
-SLURM_NODELIST environment variable is used. For example "tux[1\-3]" 
+\fIhostnames\fP takes an optional hostlist expression as input and
+writes a list of individual host names to standard output (one per
+line). If no hostlist expression is supplied, the contents of the
+SLURM_NODELIST environment variable is used. For example "tux[1\-3]"
 is mapped to "tux1","tux2" and "tux3" (one hostname per line).
-\fIhostlist\fP takes a list of host names and prints the hostlist 
-expression for them (the inverse of \fIhostnames\fP). 
-\fIhostlist\fP can also take the absolute pathname of a file 
+\fIhostlist\fP takes a list of host names and prints the hostlist
+expression for them (the inverse of \fIhostnames\fP).
+\fIhostlist\fP can also take the absolute pathname of a file
 (beginning with the character '/') containing a list of hostnames.
-Multiple node names may be specified using simple node range expressions 
-(e.g. "lx[10\-20]"). All other \fIID\fP values must identify a single 
+Multiple node names may be specified using simple node range expressions
+(e.g. "lx[10\-20]"). All other \fIID\fP values must identify a single
 element. The job step ID is of the form "job_id.step_id", (e.g. "1234.1").
-\fIslurmd\fP reports the current status of the slurmd daemon executing 
-on the same node from which the scontrol command is executed (the 
+\fIslurmd\fP reports the current status of the slurmd daemon executing
+on the same node from which the scontrol command is executed (the
 local host). It can be useful to diagnose problems.
 By default, all elements of the entity type specified are printed.
 
 .TP
 \fBshutdown\fP \fIOPTION\fP
 Instruct Slurm daemons to save current state and terminate.
-By default, the Slurm controller (slurmctld) forwards the request all 
-other daemons (slurmd daemon on each compute node). 
-An \fIOPTION\fP of \fIslurmctld\fP or \fIcontroller\fP results in 
-only the slurmctld daemon being shutdown and the slurmd daemons 
+By default, the Slurm controller (slurmctld) forwards the request all
+other daemons (slurmd daemon on each compute node).
+An \fIOPTION\fP of \fIslurmctld\fP or \fIcontroller\fP results in
+only the slurmctld daemon being shutdown and the slurmd daemons
 remaining active.
 
 .TP
 \fBsuspend\fP \fIjob_id\fP
 Suspend a running job.
 Use the \fIresume\fP command to resume its execution.
-User processes must stop on receipt of SIGSTOP signal and resume 
+User processes must stop on receipt of SIGSTOP signal and resume
 upon receipt of SIGCONT for this operation to be effective.
 Not all architectures and configurations support job suspension.
 
 .TP
 \fBtakeover\fP
 Instruct SLURM's backup controller (slurmctld) to take over system control.
-SLURM's backup controller requests control from the primary and waits for 
+SLURM's backup controller requests control from the primary and waits for
 its termination. After that, it switches from backup mode to controller
-mode. If primary controller can not be contacted, it directly switches to 
-controller mode. This can be used to speed up the SLURM controller 
+mode. If primary controller can not be contacted, it directly switches to
+controller mode. This can be used to speed up the SLURM controller
 fail\-over mechanism when the primary node is down.
 This can be used to minimize disruption if the computer executing the
 primary SLURM controller is scheduled down.
 (Note: SLURM's primary controller will take the control back at startup.)
 
 .TP
-\fBupdate\fP \fISPECIFICATION\fP 
-Update job, node, partition, or reservation configuration per the supplied 
-specification. \fISPECIFICATION\fP is in the same format as the Slurm 
+\fBupdate\fP \fISPECIFICATION\fP
+Update job, node, partition, or reservation configuration per the supplied
+specification. \fISPECIFICATION\fP is in the same format as the Slurm
 configuration file and the output of the \fIshow\fP command described above. It
-may be desirable to execute the \fIshow\fP command (described above) on the 
-specific entity you which to update, then use cut\-and\-paste tools to enter 
-updated configuration values to the \fIupdate\fP. Note that while most 
-configuration values can be changed using this command, not all can be changed 
-using this mechanism. In particular, the hardware configuration of a node or 
-the physical addition or removal of nodes from the cluster may only be 
+may be desirable to execute the \fIshow\fP command (described above) on the
+specific entity you which to update, then use cut\-and\-paste tools to enter
+updated configuration values to the \fIupdate\fP. Note that while most
+configuration values can be changed using this command, not all can be changed
+using this mechanism. In particular, the hardware configuration of a node or
+the physical addition or removal of nodes from the cluster may only be
 accomplished through editing the Slurm configuration file and executing the \fIreconfigure\fP command (described above).
 
 .TP
 \fBverbose\fP
-Print detailed event logging. 
+Print detailed event logging.
 This includes time\-stamps on data structures, record counts, etc.
 
 .TP
@@ -327,11 +327,11 @@ Value may be cleared with blank data value, "Account=".
 .TP
 \fIConnection\fP=<type>
 Reset the node connection type.
-Possible values on Blue Gene are "MESH", "TORUS" and "NAV" 
+Possible values on Blue Gene are "MESH", "TORUS" and "NAV"
 (mesh else torus).
 .TP
 \fIContiguous\fP=<yes|no>
-Set the job's requirement for contiguous (consecutive) nodes to be allocated. 
+Set the job's requirement for contiguous (consecutive) nodes to be allocated.
 Possible values are "YES" and "NO".
 .TP
 \fIDependency\fP=<job_id>
@@ -339,8 +339,8 @@ Defer job's initiation until specified job_id completes.
 Cancel dependency with job_id value of "0", "Dependency=0".
 .TP
 \fIExcNodeList\fP=<nodes>
-Set the job's list of excluded node. Multiple node names may be 
-specified using simple node range expressions (e.g. "lx[10\-20]"). 
+Set the job's list of excluded node. Multiple node names may be
+specified using simple node range expressions (e.g. "lx[10\-20]").
 Value may be cleared with blank data value, "ExcNodeList=".
 .TP
 \fIExitCode\fP=<exit>:<sig>
@@ -350,15 +350,15 @@ The second number of the signal that caused the process to terminate if
 it was terminated by a signal.
 .TP
 \fIFeatures\fP=<features>
-Set the job's required node features. Multiple values 
-may be comma separated if all features are required (AND operation) or 
+Set the job's required node features. Multiple values
+may be comma separated if all features are required (AND operation) or
 separated by "|" if any of the specified features are required (OR operation).
 Value may be cleared with blank data value, "Features=".
 .TP
 \fIGeometry\fP=<geo>
 Reset the required job geometry.
-On Blue Gene the value should be three digits separated by 
-"x" or ",". The digits represent the allocation size in 
+On Blue Gene the value should be three digits separated by
+"x" or ",". The digits represent the allocation size in
 X, Y and Z dimensions (e.g. "2x3x4").
 .TP
 \fIRotate\fP=<yes|no>
@@ -369,7 +369,7 @@ Possible values are "YES" and "NO".
 Identify the job to be updated. This specification is required.
 .TP
 \fIMinMemoryCPU\fP=<megabytes>
-Set the job's minimum real memory required per allocated CPU to the specified 
+Set the job's minimum real memory required per allocated CPU to the specified
 value.
 Either \fIMinMemoryCPU\fP or \fIMinMemoryNode\fP may be set, but not both.
 .TP
@@ -389,7 +389,7 @@ Set the job's name to the specified value.
 \fINice\fP[=delta]
 Adjust job's priority by the specified value. Default value is 100.
 The adjustment range is from \-10000 (highest priority)
-to 10000 (lowest priority). 
+to 10000 (lowest priority).
 Nice value changes are not additive, but overwrite any prior nice
 value and are applied to the job's base priority.
 Only privileged users can specify a negative adjustment.
@@ -414,8 +414,8 @@ Explicitly setting a job's priority clears any previously set nice value.
 Set the job's count of minimum cores per socket to the specified value.
 .TP
 \fIReqNodeList\fP=<nodes>
-Set the job's list of required node. Multiple node names may be specified using 
-simple node range expressions (e.g. "lx[10\-20]"). 
+Set the job's list of required node. Multiple node names may be specified using
+simple node range expressions (e.g. "lx[10\-20]").
 Value may be cleared with blank data value, "ReqNodeList=".
 .TP
 \fIReqSockets\fP=<count>
@@ -437,7 +437,7 @@ You may also specify \fImidnight\fR, \fInoon\fR, or
 \fIteatime\fR (4pm) and you can have a time\-of\-day suffixed
 with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
 You can also say what day the job will be run, by specifying
-a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR, 
+a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR,
 or a date and time as \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR.  You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
@@ -462,10 +462,10 @@ already passed for that year, in which case the next year is used.
 \fITimeLimit\fP=<time>
 The job's time limit.
 Output format is [days\-]hours:minutes:seconds or "UNLIMITED".
-Input format (for \fBupdate\fR command) set is minutes, minutes:seconds, 
-hours:minutes:seconds, days\-hours, days\-hours:minutes or 
+Input format (for \fBupdate\fR command) set is minutes, minutes:seconds,
+hours:minutes:seconds, days\-hours, days\-hours:minutes or
 days\-hours:minutes:seconds.
-Time resolution is one minute and second values are rounded up to 
+Time resolution is one minute and second values are rounded up to
 the next minute.
 
 .TP
@@ -509,7 +509,7 @@ The user and group under which the job was submitted.
 \fBSPECIFICATIONS FOR UPDATE COMMAND, NODES\fR
 .TP
 \fINodeName\fP=<name>
-Identify the node(s) to be updated. Multiple node names may be specified using 
+Identify the node(s) to be updated. Multiple node names may be specified using
 simple node range expressions (e.g. "lx[10\-20]"). This specification is required.
 .TP
 \fIFeatures\fP=<features>
@@ -522,37 +522,37 @@ slurm.conf with any changes meant to be persistent.
 
 .TP
 \fIReason\fP=<reason>
-Identify the reason the node is in a "DOWN" or "DRAINED", "DRAINING", 
+Identify the reason the node is in a "DOWN" or "DRAINED", "DRAINING",
 "FAILING" or "FAIL" state.
 Use quotes to enclose a reason having more than one word.
 
 .TP
 \fIState\fP=<state>
-Identify the state to be assigned to the node. Possible values are  "NoResp", 
+Identify the state to be assigned to the node. Possible values are  "NoResp",
 "ALLOC", "ALLOCATED", "DOWN", "DRAIN", "FAIL", "FAILING", "IDLE", "MAINT",
-"POWER_DOWN", "POWER_UP", or "RESUME". 
-If you want to remove a node from service, you typically want to set 
-it's state to "DRAIN". 
-"FAILING" is similar to "DRAIN" except that some applications will  
+"POWER_DOWN", "POWER_UP", or "RESUME".
+If you want to remove a node from service, you typically want to set
+it's state to "DRAIN".
+"FAILING" is similar to "DRAIN" except that some applications will
 seek to relinquish those nodes before the job completes.
-"RESUME" is not an actual node state, but will return a "DRAINED", "DRAINING", 
+"RESUME" is not an actual node state, but will return a "DRAINED", "DRAINING",
 or "DOWN" node to service, either "IDLE" or "ALLOCATED" state as appropriate.
 Setting a node "DOWN" will cause all running and suspended jobs on that
 node to be terminated.
 "POWER_DOWN" and "POWER_UP" will use the configured \fISuspendProg\fR and
-\fIResumeProg\fR programs to explicitly place a node in or out of a power 
+\fIResumeProg\fR programs to explicitly place a node in or out of a power
 saving mode.
 The "NoResp" state will only set the "NoResp" flag for a node without
 changing its underlying state.
 While all of the above states are valid, some of them are not valid new
-node states given their prior state. 
+node states given their prior state.
 Generally only "DRAIN", "FAIL" and "RESUME" should be used.
 
 .TP
 \fIWeight\fP=<weight>
-Identify weight to be associated with specified nodes. This allows 
-dynamic changes to weight associated with nodes, which will be used 
-for the subsequent node allocation decisions. 
+Identify weight to be associated with specified nodes. This allows
+dynamic changes to weight associated with nodes, which will be used
+for the subsequent node allocation decisions.
 Any previously identified weight will be overwritten with the new value.\fBNOTE:\fR The \fIWeight\fP associated with nodes will be reset to
 the values specified in slurm.conf (if any) upon slurmctld restart
 or reconfiguration.
@@ -562,27 +562,27 @@ Update slurm.conf with any changes meant to be persistent.
 \fBSPECIFICATIONS FOR CREATE, UPDATE, AND DELETE COMMANDS, PARTITIONS\fR
 .TP
 \fIAllowGroups\fP=<name>
-Identify the user groups which may use this partition. 
-Multiple groups may be specified in a comma separated list. 
+Identify the user groups which may use this partition.
+Multiple groups may be specified in a comma separated list.
 To permit all groups to use the partition specify "AllowGroups=ALL".
 .TP
 \fIDefault\fP=<yes|no>
-Specify if this partition is to be used by jobs which do not explicitly 
-identify a partition to use. 
+Specify if this partition is to be used by jobs which do not explicitly
+identify a partition to use.
 Possible output values are "YES" and "NO".
-In order to change the default partition of a running system, 
+In order to change the default partition of a running system,
 use the scontrol update command and set Default=yes for the partition
 that you want to become the new default.
 
 .TP
 \fIHidden\fP=<yes|no>
-Specify if the partition and its jobs should be hidden from view. 
-Hidden partitions will by default not be reported by SLURM APIs 
-or commands. 
+Specify if the partition and its jobs should be hidden from view.
+Hidden partitions will by default not be reported by SLURM APIs
+or commands.
 Possible values are "YES" and "NO".
 .TP
 \fIMaxNodes\fP=<count>
-Set the maximum number of nodes which will be allocated to any single job 
+Set the maximum number of nodes which will be allocated to any single job
 in the partition. Specify a number, "INFINITE" or "UNLIMITED".  (On a
 Bluegene type system this represents a c\-node count.)
 
@@ -590,21 +590,21 @@ Bluegene type system this represents a c\-node count.)
 \fIMaxTime\fP=<time>
 The maximum run time for jobs.
 Output format is [days\-]hours:minutes:seconds or "UNLIMITED".
-Input format (for \fBupdate\fR command) is minutes, minutes:seconds, 
-hours:minutes:seconds, days\-hours, days\-hours:minutes or 
+Input format (for \fBupdate\fR command) is minutes, minutes:seconds,
+hours:minutes:seconds, days\-hours, days\-hours:minutes or
 days\-hours:minutes:seconds.
-Time resolution is one minute and second values are rounded up to 
+Time resolution is one minute and second values are rounded up to
 the next minute.
 
 .TP
 \fIMinNodes\fP=<count>
-Set the minimum number of nodes which will be allocated to any single job 
+Set the minimum number of nodes which will be allocated to any single job
 in the partition.   (On a Bluegene type system this represents a c\-node count.)
 
 .TP
 \fINodes\fP=<name>
-Identify the node(s) to be associated with this partition. Multiple node names 
-may be specified using simple node range expressions (e.g. "lx[10\-20]"). 
+Identify the node(s) to be associated with this partition. Multiple node names
+may be specified using simple node range expressions (e.g. "lx[10\-20]").
 Note that jobs may only be associated with one partition at any time.
 Specify a blank data value to remove all nodes from a partition: "Nodes=".
 
@@ -614,23 +614,23 @@ Identify the partition to be updated. This specification is required.
 
 .TP
 \fIRootOnly\fP=<yes|no>
-Specify if only allocation requests initiated by user root will be satisfied. 
-This can be used to restrict control of the partition to some meta\-scheduler. 
+Specify if only allocation requests initiated by user root will be satisfied.
+This can be used to restrict control of the partition to some meta\-scheduler.
 Possible values are "YES" and "NO".
 
 .TP
 \fIShared\fP=<yes|no|exclusive|force>[:<job_count>]
-Specify if nodes in this partition can be shared by multiple jobs. 
+Specify if nodes in this partition can be shared by multiple jobs.
 Possible values are "YES", "NO", "EXCLUSIVE" and "FORCE".
 An optional job count specifies how many jobs can be allocated to use
 each resource.
 
 .TP
 \fIState\fP=<up|down>
-Specify if jobs can be allocated nodes in this partition. 
+Specify if jobs can be allocated nodes in this partition.
 Possible values are"UP" and "DOWN".
-If a partition allocated nodes to running jobs, those jobs will continue 
-execution even after the partition's state is set to "DOWN". The jobs 
+If a partition allocated nodes to running jobs, those jobs will continue
+execution even after the partition's state is set to "DOWN". The jobs
 must be explicitly canceled to force their termination.
 
 .TP
@@ -639,16 +639,16 @@ must be explicitly canceled to force their termination.
 
 .TP
 \fIReservation\fP=<name>
-Identify the name of the reservation to be created, updated, or deleted.  
-This parameter is required for update and is the only parameter for delete.  
-For create, if you do not want to give a reservation name, use 
+Identify the name of the reservation to be created, updated, or deleted.
+This parameter is required for update and is the only parameter for delete.
+For create, if you do not want to give a reservation name, use
 "scontrol create res ..." and a name will be created automatically.
 
 .TP
 \fILicenses\fP=<license>
-Specification of licenses (or other resources available on all 
+Specification of licenses (or other resources available on all
 nodes of the cluster) which are to be reserved.
-License names can be followed by an asterisk and count 
+License names can be followed by an asterisk and count
 (the default count is one).
 Multiple license names should be comma separated (e.g.
 "Licenses=foo*4,bar").
@@ -660,14 +660,14 @@ NodeCnt or Nodes.
 
 .TP
 \fINodes\fP=<name>
-Identify the node(s) to be reserved. Multiple node names 
-may be specified using simple node range expressions (e.g. "Nodes=lx[10\-20]"). 
+Identify the node(s) to be reserved. Multiple node names
+may be specified using simple node range expressions (e.g. "Nodes=lx[10\-20]").
 Specify a blank data value to remove all nodes from a reservation: "Nodes=".
 A new reservation must specify either NodeCnt or Nodes.
 
 .TP
 \fIStartTime\fP=<time_spec>
-The start time for the reservation.  A new reservation must specify a start 
+The start time for the reservation.  A new reservation must specify a start
 time.  It accepts times of the form \fIHH:MM:SS\fR for
 a specific time of day (seconds are optional).
 (If that time is already past, the next day is assumed.)
@@ -675,7 +675,7 @@ You may also specify \fImidnight\fR, \fInoon\fR, or
 \fIteatime\fR (4pm) and you can have a time\-of\-day suffixed
 with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
 You can also say what day the job will be run, by specifying
-a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR, 
+a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR,
 or a date and time as \fIYYYY\-MM\-DD[THH:MM[:SS]]\fR.  You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
@@ -685,15 +685,15 @@ and you can tell SLURM to run the job today with the keyword
 
 .TP
 \fIEndTime\fP=<time_spec>
-The end time for the reservation.  A new reservation must specify an end 
+The end time for the reservation.  A new reservation must specify an end
 time or a duration.  Valid formats are the same as for StartTime.
 
 .TP
 \fIDuration\fP=<time>
-The length of a reservation.  A new reservation must specify an end 
-time or a duration.  Valid formats are minutes, minutes:seconds, 
-hours:minutes:seconds, days\-hours, days\-hours:minutes, 
-days\-hours:minutes:seconds, or UNLIMITED.  Time resolution is one minute and 
+The length of a reservation.  A new reservation must specify an end
+time or a duration.  Valid formats are minutes, minutes:seconds,
+hours:minutes:seconds, days\-hours, days\-hours:minutes,
+days\-hours:minutes:seconds, or UNLIMITED.  Time resolution is one minute and
 second values are rounded up to the next minute.
 
 .TP
@@ -702,25 +702,25 @@ Identify the partition to be reserved.
 
 .TP
 \fIFlags\fP=<flags>
-Flags associated with the reservation. 
-In order to remove a flag with the update option, preceed the name with 
-a minus sign. For example: Flags=\-DAILY (NOTE: this option is not supported 
+Flags associated with the reservation.
+In order to remove a flag with the update option, preceed the name with
+a minus sign. For example: Flags=\-DAILY (NOTE: this option is not supported
 for all flags).
 Currently supported flags include:
 .RS
 .TP 12
 \fIMAINT\fR
 Maintenance mode, receives special accounting treatment.
-This partition is permitted to use resources that are already in another 
+This partition is permitted to use resources that are already in another
 reservation.
 .TP
 \fIOVERLAP\fR
-This reservation can be allocated resources that are already in another 
+This reservation can be allocated resources that are already in another
 reservation.
 .TP
 \fIIGNORE_JOBS\fR
 Ignore currently running jobs when creating the reservation.
-This can be especially useful when reserving all nodes in the system 
+This can be especially useful when reserving all nodes in the system
 for maintenance.
 .TP
 \fIDAILY\fR
@@ -735,20 +735,20 @@ Reservation is for specific nodes (output only)
 
 .TP
 \fIFeatures\fP=<features>
-Set the reservation's required node features. Multiple values 
-may be "&" separated if all features are required (AND operation) or 
+Set the reservation's required node features. Multiple values
+may be "&" separated if all features are required (AND operation) or
 separated by "|" if any of the specified features are required (OR operation).
 Value may be cleared with blank data value, "Features=".
 
 .TP
 \fIUsers\fP=<user list>
-List of users permitted to use the reserved nodes.  
+List of users permitted to use the reserved nodes.
 E.g.  Users=jones1,smith2.
 A new reservation must specify Users and/or Accounts.
 
 .TP
 \fIAccounts\fP=<account list>
-List of accounts permitted to use the reserved nodes.  
+List of accounts permitted to use the reserved nodes.
 E.g.  Accounts=physcode1,physcode2.  A user in any of the accounts
 may use the reserved nodes.
 A new reservation must specify Users and/or Accounts.
@@ -756,13 +756,13 @@ A new reservation must specify Users and/or Accounts.
 .TP
 \fBSPECIFICATIONS FOR UPDATE, BLOCK \fR
 .TP
-Bluegene systems only! 
+Bluegene systems only!
 .TP
 \fIBlockName\fP=<name>
 Identify the bluegene block to be updated. This specification is required.
 .TP
 \fIState\fP=<free|error|remove>
-This will update the state of a bluegene block to either FREE or ERROR. 
+This will update the state of a bluegene block to either FREE or ERROR.
 (i.e. update BlockName=RMP0 STATE=ERROR) State error will not allow jobs
 to run on the block. \fBWARNING!!!!\fR This will cancel any
 running job on the block!  On dynamically laid out systems REMOVE will
@@ -774,8 +774,8 @@ Identify the bluegene ionodes to be updated (i.e. bg000[0\-3]). This
 specification is required.
 .TP
 
-.SH "ENVIRONMENT VARIABLES" 
-.PP 
+.SH "ENVIRONMENT VARIABLES"
+.PP
 Some \fBscontrol\fR options may
 be set via environment variables. These environment variables,
 along with their corresponding options, are listed below. (Note:
@@ -789,37 +789,37 @@ The location of the SLURM configuration file.
 
 .SH "EXAMPLES"
 .eo
-.br 
+.br
 # scontrol
-.br 
+.br
 scontrol: show part class
-.br 
+.br
 PartitionName=class TotalNodes=10 TotalCPUs=20 RootOnly=NO
-.br 
+.br
    Default=NO Shared=NO State=UP MaxTime=0:30:00 Hidden=NO
 .br
    MinNodes=1 MaxNodes=2 AllowGroups=students
 .br
    Nodes=lx[0031-0040] NodeIndices=31,40,-1
-.br 
+.br
 scontrol: update PartitionName=class MaxTime=60:00 MaxNodes=4
-.br 
+.br
 scontrol: show job 65539
-.br 
+.br
 JobId=65539 UserId=1500 JobState=PENDING TimeLimit=0:20:00
-.br 
-   Priority=100 Partition=batch Name=job01 NodeList=(null) 
-.br 
+.br
+   Priority=100 Partition=batch Name=job01 NodeList=(null)
+.br
    StartTime=0 EndTime=0 Shared=0 ReqProcs=1000
 .br
    ReqNodes=400 Contiguous=1 MinCPUs=4 MinMemoryNode=1024
-.br 
-   MinTmpDisk=2034 ReqNodeList=lx[3000-3003] 
 .br
-   Features=(null) JobScript=/bin/hostname 
-.br 
+   MinTmpDisk=2034 ReqNodeList=lx[3000-3003]
+.br
+   Features=(null) JobScript=/bin/hostname
+.br
 scontrol: update JobId=65539 TimeLimit=30:00 Priority=500
-.br 
+.br
 scontrol: show hostnames tux[1-3]
 .br
 tux1
@@ -860,16 +860,16 @@ details.
 .LP
 /etc/slurm.conf
 .SH "SEE ALSO"
-\fBscancel\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1), 
+\fBscancel\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1),
 \fBslurm_checkpoint\fR(3),
 \fBslurm_create_partition\fR(3),
 \fBslurm_delete_partition\fR(3),
-\fBslurm_load_ctl_conf\fR(3), 
-\fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3), 
-\fBslurm_load_partitions\fR(3), 
+\fBslurm_load_ctl_conf\fR(3),
+\fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3),
+\fBslurm_load_partitions\fR(3),
 \fBslurm_reconfigure\fR(3),  \fBslurm_requeue\fR(3), \fBslurm_resume\fR(3),
 \fBslurm_shutdown\fR(3), \fBslurm_suspend\fR(3),
 \fBslurm_takeover\fR(3),
-\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3), 
+\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3),
 \fBslurm_update_partition\fR(3),
 \fBslurm.conf\fR(5)
diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1
index a3dab17322b9bd9e81ee25361cd7833358b02c69..0b876f96995bb447514ba9553b060a8dab1d5ef6 100644
--- a/doc/man/man1/sinfo.1
+++ b/doc/man/man1/sinfo.1
@@ -4,16 +4,16 @@
 sinfo \- view information about SLURM nodes and partitions.
 
 .SH "SYNOPSIS"
-\fBsinfo\fR [\fIOPTIONS\fR...] 
+\fBsinfo\fR [\fIOPTIONS\fR...]
 .SH "DESCRIPTION"
-\fBsinfo\fR is used to view partition and node information for a 
-system running SLURM. 
+\fBsinfo\fR is used to view partition and node information for a
+system running SLURM.
 
 .SH "OPTIONS"
 
 .TP
 \fB\-a\fR, \fB\-\-all\fR
-Display information about all partions. This causes information to be 
+Display information about all partions. This causes information to be
 displayed about partitions that are configured as hidden and partitions that
 are unavailable to user's group.
 
@@ -43,26 +43,26 @@ Print a message describing all \fBsinfo\fR options.
 .TP
 
 \fB\-\-hide\fR
-Do not display information about hidden partitions. By default, partitions 
-that are configured as hidden or are not available to the user's group 
+Do not display information about hidden partitions. By default, partitions
+that are configured as hidden or are not available to the user's group
 will not be displayed (i.e. this is the default behavior).
 
 .TP
 \fB\-i <seconds>\fR, \fB\-\-iterate=<seconds>\fR
-Print the state on a periodic basis. 
+Print the state on a periodic basis.
 Sleep for the indicated number of seconds between reports.
 By default, prints a time stamp with the header.
 
 .TP
 \fB\-l\fR, \fB\-\-long\fR
-Print more detailed information. 
+Print more detailed information.
 This is ignored if the \fB\-\-format\fR option is specified.
 
 .TP
 \fB\-n <nodes>\fR, \fB\-\-nodes=<nodes>\fR
-Print information only about the specified node(s). 
-Multiple nodes may be comma separated or expressed using a 
-node range expression. For example "linux[00\-07]" would 
+Print information only about the specified node(s).
+Multiple nodes may be comma separated or expressed using a
+node range expression. For example "linux[00\-07]" would
 indicate eight nodes, "linux00" through "linux07."
 
 .TP
@@ -80,10 +80,10 @@ when running with various options are
 .TP 15
 .I "default"
 "%9P %5a %.10l %.5D %6t %N"
-.TP 
+.TP
 .I "\-\-summarize"
 "%9P %5a %.10l %15F %N"
-.TP 
+.TP
 .I "\-\-long"
 "%9P %5a %.10l %.8s %4r %5h %10g %.5D %11T %N"
 .TP
@@ -104,18 +104,18 @@ when running with various options are
 In the above format strings the use of "#" represents the
 maximum length of an node list to be printed.
 .IP
-The field specifications available include: 
+The field specifications available include:
 .RS
 .TP 4
-\fB%a\fR 
+\fB%a\fR
 State/availability of a partition
 .TP
-\fB%A\fR 
+\fB%A\fR
 Number of nodes by state in the format "allocated/idle".
 Do not use this with a node state option ("%t" or "%T") or
 the different node states will be placed on separate lines.
 .TP
-\fB%c\fR 
+\fB%c\fR
 Number of CPUs per node
 .TP
 \fB%C\fR
@@ -124,91 +124,91 @@ Number of CPUs by state in the format
 state option ("%t" or "%T") or the different node states will
 be placed on separate lines.
 .TP
-\fB%d\fR 
+\fB%d\fR
 Size of temporary disk space per node in megabytes
 .TP
-\fB%D\fR 
+\fB%D\fR
 Number of nodes
 .TP
 \fB%E\fR
 The reason a node is unavailable (down, drained, or draining states).
-This is the same as \fB%R\fR except the entries will be sorted by 
+This is the same as \fB%R\fR except the entries will be sorted by
 time rather than the reason string.
 .TP
-\fB%f\fR 
+\fB%f\fR
 Features associated with the nodes
 .TP
-\fB%F\fR 
+\fB%F\fR
 Number of nodes by state in the format
 "allocated/idle/other/total".  Do not use this with a node
 state option ("%t" or "%T") or the different node states will
 be placed on separate lines.
 .TP
-\fB%g\fR 
+\fB%g\fR
 Groups which may use the nodes
 .TP
-\fB%h\fR 
+\fB%h\fR
 Jobs may share nodes, "yes", "no", or "force"
 .TP
-\fB%l\fR 
+\fB%l\fR
 Maximum time for any job in the format "days\-hours:minutes:seconds"
 .TP
 \fB%L\fR
 Default time for any job in the format "days\-hours:minutes:seconds"
 .TP
-\fB%m\fR 
+\fB%m\fR
 Size of memory per node in megabytes
 .TP
-\fB%N\fR 
+\fB%N\fR
 List of node names
 .TP
-\fB%P\fR 
+\fB%P\fR
 Partition name
 .TP
-\fB%r\fR 
+\fB%r\fR
 Only user root may initiate jobs, "yes" or "no"
 .TP
-\fB%R\fR 
-The reason a node is unavailable (down, drained, draining, 
+\fB%R\fR
+The reason a node is unavailable (down, drained, draining,
 fail or failing states)
 .TP
-\fB%s\fR 
+\fB%s\fR
 Maximum job size in nodes
 .TP
-\fB%S\fR 
+\fB%S\fR
 Allowed allocating nodes
 .TP
-\fB%t\fR 
+\fB%t\fR
 State of nodes, compact form
 .TP
-\fB%T\fR 
+\fB%T\fR
 State of nodes, extended form
 .TP
-\fB%w\fR 
+\fB%w\fR
 Scheduling weight of the nodes
 .TP
-\fB%X\fR 
+\fB%X\fR
 Number of sockets per node
 .TP
-\fB%Y\fR 
+\fB%Y\fR
 Number of cores per socket
 .TP
-\fB%Z\fR 
+\fB%Z\fR
 Number of threads per core
 .TP
-\fB%z\fR 
+\fB%z\fR
 Extended processor information: number of sockets, cores, threads (S:C:T) per node
 .TP
-\fB%.<*>\fR 
+\fB%.<*>\fR
 right justification of the field
 .TP
-\fB%<Number><*>\fR 
+\fB%<Number><*>\fR
 size of field
 .RE
 
 .TP
 \fB\-p <partition>\fR, \fB\-\-partition=<partition>\fR
-Print information only about the specified partition.  
+Print information only about the specified partition.
 
 .TP
 \fB\-r\fR, \fB\-\-responding\fR
@@ -216,16 +216,16 @@ If set only report state information for responding nodes.
 
 .TP
 \fB\-R\fR, \fB\-\-list\-reasons\fR
-List reasons nodes are in the down, drained, fail or failing state. 
-When nodes are in these states SLURM supports optional inclusion 
-of a "reason" string by an administrator. 
-This option will display the first 35 characters of the reason 
-field and list of nodes with that reason for all nodes that are, 
-by default, down, drained, draining or failing. 
-This option may be used with other node filtering options 
+List reasons nodes are in the down, drained, fail or failing state.
+When nodes are in these states SLURM supports optional inclusion
+of a "reason" string by an administrator.
+This option will display the first 35 characters of the reason
+field and list of nodes with that reason for all nodes that are,
+by default, down, drained, draining or failing.
+This option may be used with other node filtering options
 (e.g. \fB\-r\fR, \fB\-d\fR, \fB\-t\fR, \fB\-n\fR),
-however, combinations of these options that result in a 
-list of nodes that are not down or drained or failing will 
+however, combinations of these options that result in a
+list of nodes that are not down or drained or failing will
 not produce any output.
 When used with \fB\-l\fR the output additionally includes
 the current node state.
@@ -258,11 +258,11 @@ List nodes only having the given state(s).  Multiple states
 may be comma separated and the comparison is case insensitive.
 Possible values include (case insensitive): ALLOC, ALLOCATED,
 COMP, COMPLETING, DOWN, DRAIN (for node in DRAINING or DRAINED
-states), DRAINED, DRAINING, FAIL, FAILING, IDLE, MAINT, NO_RESPOND, 
-POWER_SAVE, UNK, and UNKNOWN.  
-By default nodes in the specified state are reported whether 
-they are responding or not.  
-The \fB\-\-dead\fR and \fB\-\-responding\fR options may be 
+states), DRAINED, DRAINING, FAIL, FAILING, IDLE, MAINT, NO_RESPOND,
+POWER_SAVE, UNK, and UNKNOWN.
+By default nodes in the specified state are reported whether
+they are responding or not.
+The \fB\-\-dead\fR and \fB\-\-responding\fR options may be
 used to filtering nodes by the responding flag.
 
 .TP
@@ -340,22 +340,22 @@ restricted to user root, \fByes\fR or \fBno\fR.
 .TP
 \fBSHARE\fR
 Will jobs allocated resources in this partition share those
-resources.  
+resources.
 \fBno\fR indicates resources are never shared.
 \fBexclusive\fR indicates whole nodes are dedicated to jobs
-(equivalent to srun \-\-exclusive option, may be used even 
+(equivalent to srun \-\-exclusive option, may be used even
 with shared/cons_res managing individual processors).
-\fBforce\fR indicates resources are always available to be shared.  
+\fBforce\fR indicates resources are always available to be shared.
 \fByes\fR indicates resource may be shared or not
 per job's resource allocation.
 .TP
 \fBSTATE\fR
-State of the nodes. 
-Possible states include: allocated, completing, down, 
+State of the nodes.
+Possible states include: allocated, completing, down,
 drained, draining, fail, failing, idle, and unknown plus
-their abbreviated forms: alloc, comp, donw, drain, drng, 
+their abbreviated forms: alloc, comp, donw, drain, drng,
 fail, failg, idle, and unk respectively.
-Note that the suffix "*" identifies nodes that are presently 
+Note that the suffix "*" identifies nodes that are presently
 not responding.
 .TP
 \fBTMP_DISK\fR
@@ -385,7 +385,7 @@ The node is allocated to one or more active jobs plus
 one or more jobs are in the process of COMPLETING.
 .TP
 \fBCOMPLETING\fR
-All jobs associated with this node are in the process of 
+All jobs associated with this node are in the process of
 COMPLETING.  This node state will be removed when
 all of the job's processes have terminated and the SLURM
 epilog program (if any) has terminated. See the \fBEpilog\fR
@@ -416,15 +416,15 @@ node\fR command in the \fBscontrol\fR(1) man page or the
 \fBslurm.conf\fR(5) man page for more information.
 .TP
 \fBFAIL\fR
-The node is expected to fail soon and is unavailable for 
-use per system administrator request.  
-See the \fBupdate node\fR command in the \fBscontrol\fR(1) 
+The node is expected to fail soon and is unavailable for
+use per system administrator request.
+See the \fBupdate node\fR command in the \fBscontrol\fR(1)
 man page or the \fBslurm.conf\fR(5) man page for more information.
 .TP
 \fBFAILING\fR
-The node is currently executing a job, but is expected to fail 
-soon and is unavailable for use per system administrator request.  
-See the \fBupdate node\fR command in the \fBscontrol\fR(1) 
+The node is currently executing a job, but is expected to fail
+soon and is unavailable for use per system administrator request.
+See the \fBupdate node\fR command in the \fBscontrol\fR(1)
 man page or the \fBslurm.conf\fR(5) man page for more information.
 .TP
 \fBIDLE\fR
@@ -437,8 +437,8 @@ The node is currently in a reservation with a flag value of "maintainence".
 The SLURM controller has just started and the node's state
 has not yet been determined.
 
-.SH "ENVIRONMENT VARIABLES" 
-.PP 
+.SH "ENVIRONMENT VARIABLES"
+.PP
 Some \fBsinfo\fR options may
 be set via environment variables. These environment variables,
 along with their corresponding options, are listed below. (Note:
@@ -472,7 +472,7 @@ batch     up     infinite     6 idle   adev[10-15]
 debug*    up        30:00     8 idle   adev[0-7]
 
 .fi
- 
+
 Report partition summary information:
 .nf
 
@@ -482,7 +482,7 @@ batch     up     infinite 2/6/0/8        adev[8-15]
 debug*    up        30:00 0/8/0/8        adev[0-7]
 
 .fi
- 
+
 Report more complete information about the partition debug:
 .nf
 
@@ -544,10 +544,10 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBscontrol\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1), 
-\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3), 
-\fBslurm_load_partitions\fR(3), 
-\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3), 
-\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3), 
+\fBscontrol\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1),
+\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3),
+\fBslurm_load_partitions\fR(3),
+\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3),
+\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3),
 \fBslurm_update_partition\fR(3),
 \fBslurm.conf\fR(5)
diff --git a/doc/man/man1/slurm.1 b/doc/man/man1/slurm.1
index 20ab95650a6a22489aeadb3a790c94c11f5cd61e..c23c3d98729cb0153f2758b6111bbceb395e4a9e 100644
--- a/doc/man/man1/slurm.1
+++ b/doc/man/man1/slurm.1
@@ -20,21 +20,21 @@ resource limits by user or bank account,
 and sophisticated multifactor job prioritization algorithms.
 
 SLURM has a centralized manager, \fBslurmctld\fR, to monitor resources and
-work. There may also be a backup manager to assume those responsibilities in the 
+work. There may also be a backup manager to assume those responsibilities in the
 event of failure. Each compute server (node) has a \fBslurmd\fR daemon, which
-can be compared to a remote shell: it waits for work, executes that work, returns 
+can be compared to a remote shell: it waits for work, executes that work, returns
 status, and waits for more work. An optional \fBslurmDBD\fR (SLURM DataBase Daemon)
 can be used for accounting purposes and to maintain resource limit information.
 
-Basic user tools include \fBsrun\fR to initiate jobs, 
-\fBscancel\fR to terminate queued or running jobs, \fBsinfo\fR to report system 
+Basic user tools include \fBsrun\fR to initiate jobs,
+\fBscancel\fR to terminate queued or running jobs, \fBsinfo\fR to report system
 status, and \fBsqueue\fR to report the status of jobs. There is also an administrative
 tool \fBscontrol\fR available to monitor and/or modify configuration and state
 information. APIs are available for all functions.
 
 SLURM configuration is maintained in the \fBslurm.conf\fR file.
 
-Man pages are available for all SLURM commands, daemons, APIs, plus the 
+Man pages are available for all SLURM commands, daemons, APIs, plus the
 \fBslurm.conf\fR file.
 Extensive documenation is also available on the internet at
 \fB<https://computing.llnl.gov/linux/slurm/>\fR.
@@ -59,13 +59,13 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBsacct\fR(1), \fBsacctmgr\fR(1), \fBsalloc\fR(1), \fBsattach\fR(1), 
-\fBsbatch\fR(1), \fBsbcast\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1), 
-\fBsinfo\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1), \fBsreport\fR(1), 
+\fBsacct\fR(1), \fBsacctmgr\fR(1), \fBsalloc\fR(1), \fBsattach\fR(1),
+\fBsbatch\fR(1), \fBsbcast\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1),
+\fBsinfo\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1), \fBsreport\fR(1),
 \fBsrun\fR(1),\fBsshare\fR(1), \fBsstate\fR(1), \fBstrigger\fR(1),
-\fBsview\fR(1), 
-\fBbluegene.conf\fR(5), \fBslurm.conf\fR(5), \fBslurmdbd.conf\fR(5), 
+\fBsview\fR(1),
+\fBbluegene.conf\fR(5), \fBslurm.conf\fR(5), \fBslurmdbd.conf\fR(5),
 \fBwiki.conf\fR(5),
-\fBslurmctld\fR(8), \fBslurmd\fR(8), \fBslurmdbd\fR(8), \fBslurmstepd\fR(8), 
+\fBslurmctld\fR(8), \fBslurmd\fR(8), \fBslurmdbd\fR(8), \fBslurmstepd\fR(8),
 \fBspank\fR(8)
 
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 225eb548d7bbcb8fc33add4a2bd5f97f522feaed..282d583de45c5c6a9c06514b508e1db0264ade39 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -1,18 +1,18 @@
 .TH SMAP "1" "June 2009" "smap 2.1" "Slurm components"
 
 .SH "NAME"
-smap \- graphically view information about SLURM jobs, partitions, and set 
+smap \- graphically view information about SLURM jobs, partitions, and set
 configurations parameters.
 
 .SH "SYNOPSIS"
-\fBsmap\fR [\fIOPTIONS\fR...] 
+\fBsmap\fR [\fIOPTIONS\fR...]
 .SH "DESCRIPTION"
-\fBsmap\fR is used to graphically view job, partition and node information 
-for a system running SLURM. 
-Note that information about nodes and partitions to which a user lacks 
+\fBsmap\fR is used to graphically view job, partition and node information
+for a system running SLURM.
+Note that information about nodes and partitions to which a user lacks
 access will always be displayed to avoid obvious gaps in the output.
-This is equivalent to the \fB\-\-all\fR option of the \fBsinfo\fR and 
-\fBsqueue\fR commands. 
+This is equivalent to the \fB\-\-all\fR option of the \fBsinfo\fR and
+\fBsqueue\fR commands.
 
 .SH "OPTIONS"
 .TP
@@ -21,12 +21,12 @@ Print output to the commandline, no curses.
 
 .TP
 \fB\-D <option>\fR, \fB\-\-display=<option>\fR
-sets the display mode for smap. Showing revelant information about specific 
-views and displaying a corresponding node chart. While in any 
+sets the display mode for smap. Showing revelant information about specific
+views and displaying a corresponding node chart. While in any
 display a user can switch by typing a different view letter.  This is true in
-all modes except for 'configure mode' user can type 'quit' to exit just 
+all modes except for 'configure mode' user can type 'quit' to exit just
 configure mode.  Typing 'exit' will end the configuration mode and exit smap.
-Note that unallocated nodes are indicated by a '.' and nodes in the 
+Note that unallocated nodes are indicated by a '.' and nodes in the
 DOWN, DRAINED or FAIL state by a '#'.
 .RS
 .TP 15
@@ -40,10 +40,10 @@ Displays current BlueGene node states and allows users to configure the system.
 Displays information about jobs running on system.
 .TP
 .I "r"
-Display information about advanced reservations. 
-While all current and future reservations will be listed, 
+Display information about advanced reservations.
+While all current and future reservations will be listed,
 only currently active reservations will appear on the node map.
-.TP 
+.TP
 .I "s"
 Displays information about slurm partitions on the system
 .RE
@@ -58,10 +58,10 @@ Print a message describing all \fBsmap\fR options.
 
 .TP
 \fB\-i <seconds>\fR , \fB\-\-iterate=<seconds>\fR
-Print the state on a periodic basis. 
+Print the state on a periodic basis.
 Sleep for the indicated number of seconds between reports.
 User can exit at anytime by typing 'q' or hitting the return key.
-If user is in configure mode type 'exit' to exit program, 'quit' 
+If user is in configure mode type 'exit' to exit program, 'quit'
 to exit configure mode.
 
 .TP
@@ -69,7 +69,7 @@ to exit configure mode.
 Only show objects with these ionodes this support is only for
 bluegene systems. This should be used inconjuction with the '\-n'
 option.  Only specify the ionode number range here.  Specify the node
-name with the '\-n' option. 
+name with the '\-n' option.
 
 .TP
 \fB\-n\fR, \fB\-\-nodes\fR
@@ -83,10 +83,10 @@ Avoid printing error messages.
 .TP
 \fB\-R <RACK_MIDPLANE_ID/XYZ>\fR, \fB\-\-resolve=<RACK_MIDPLANE_ID/XYZ>\fR
 Returns the XYZ coords for a Rack/Midplane id or vice\-versa.
-                                                                                  
+
 To get the XYZ coord for a Rack/Midplane id input \-R R101 where 10 is the rack
 and 1 is the midplane.
-                                                                                  
+
 To get the Rack/Midplane id from a XYZ coord input \-R 101 where X=1 Y=1 Z=1 with
 no leading 'R'.
 
@@ -100,8 +100,8 @@ Print version information and exit.
 
 .SH "INTERACTIVE OPTIONS"
 When using smap in curses mode you can scroll through the different windows
-using the arrow keys.  The \fBup\fR and \fBdown\fR arrow keys scroll 
-the window containing the grid, and the \fBleft\fR and \fBright\fR arrow keys 
+using the arrow keys.  The \fBup\fR and \fBdown\fR arrow keys scroll
+the window containing the grid, and the \fBleft\fR and \fBright\fR arrow keys
 scroll the window containing the text information.
 
 .SH "OUTPUT FIELD DESCRIPTIONS"
@@ -133,7 +133,7 @@ Mode Type: \fBCOPROCESS\fR or \fBVIRTUAL\fR.
 Name of the job or advanced reservation.
 .TP
 \fBNODELIST\fR or \fBBP_LIST\fR
-Names of nodes or base partitions associated with this configuration, 
+Names of nodes or base partitions associated with this configuration,
 partition or reservation.
 .TP
 \fBNODES\fR
@@ -145,21 +145,21 @@ default partition.
 .TP
 \fBST\fR
 State of a job in compact form. Possible states include:
-PD (pending), R (running), S (suspended), 
-CD  (completed), CF (configuring), CG (completing), 
-F (failed), TO (timeout), and NF (node failure). See 
+PD (pending), R (running), S (suspended),
+CD  (completed), CF (configuring), CG (completing),
+F (failed), TO (timeout), and NF (node failure). See
 \fBJOB STATE CODES\fR section below for more information.
 .TP
 \fBSTART_TIME\fR
 The time when an advanced reservation started.
 .TP
 \fBSTATE\fR
-State of the nodes. 
-Possible states include: allocated, completing, down, 
+State of the nodes.
+Possible states include: allocated, completing, down,
 drained, draining, fail, failing, idle, and unknown plus
-their abbreviated forms: alloc, comp, donw, drain, drng, 
+their abbreviated forms: alloc, comp, donw, drain, drng,
 fail, failg, idle, and unk respectively.
-Note that the suffix "*" identifies nodes that are presently 
+Note that the suffix "*" identifies nodes that are presently
 not responding.
 See \fBNODE STATE CODES\fR section below for more information.
 .TP
@@ -171,19 +171,19 @@ jobs or partitions without a job time limit.
 
 .SH "TOPOGRAPHY INFORMATION"
 .PP
-The node chart is designed to indicate relative locations of 
-the nodes. 
-On most Linux clusters this will represent a one\-dimensional 
-array of nodes. Larger clusters will utilize multiple as needed 
-with right side of one line being logically followed by the 
+The node chart is designed to indicate relative locations of
+the nodes.
+On most Linux clusters this will represent a one\-dimensional
+array of nodes. Larger clusters will utilize multiple as needed
+with right side of one line being logically followed by the
 left side of the next line.
 .PP
 .nf
-On BlueGene systems, the node chart will indicate the three 
+On BlueGene systems, the node chart will indicate the three
 dimensional topography of the system.
 The X dimension will increase from left to right on a given line.
 The Y dimension will increase in planes from bottom to top.
-The Z dimension will increase within a plane from the back 
+The Z dimension will increase within a plane from the back
 line to the front line of a plane.
 Note the example below:
 
@@ -218,9 +218,9 @@ e  12378 debug     RMP4     joseph asx4 R   0:34    2k bgl[612x713]
 
 .SH "CONFIGURATION INSTRUCTIONS"
 .PP
-For Admin use. From this screen one can create a configuration 
+For Admin use. From this screen one can create a configuration
 file that is used to partition and wire the system into usable
-blocks.  
+blocks.
 
 .TP
 \fBOUTPUT\fR
@@ -248,45 +248,45 @@ Mode Type: \fBCOPROCESS\fR or \fBVIRTUAL\fR.
 Returns the XYZ coords for a Rack/Midplane id or vice\-versa.
 
 To get the XYZ coord for a Rack/Midplane id input \-R R101 where 10 is the rack
-and 1 is the midplane.  
+and 1 is the midplane.
 
 To get the Rack/Midplane id from a XYZ coord input \-R 101 where X=1 Y=1 Z=1 with
-no leading 'R'.  
+no leading 'R'.
 
 .TP
 \fBload <bluegene.conf file>\fR
 Load an already exsistant bluegene.conf file. This will varify and mapout a
-bluegene.conf file.  After loaded the configuration may be edited and 
+bluegene.conf file.  After loaded the configuration may be edited and
 saved as a new file.
 
-.TP 
+.TP
 \fBcreate <size> <options>\fR
-Submit request for partition creation. The size may be specified either 
-as a count of base partitions or specific dimensions in the X, Y and Z 
-directions separated by "x", for example "2x3x4". A variety of options 
+Submit request for partition creation. The size may be specified either
+as a count of base partitions or specific dimensions in the X, Y and Z
+directions separated by "x", for example "2x3x4". A variety of options
 may be specified. Valid options are listed below. Note that the option
 and their values are case insensitive (e.g. "MESH" and "mesh" are equivalent).
 .TP
 \fBStart = XxYxZ\fR
-Identify where to start the partition.  This is primarily for testing 
+Identify where to start the partition.  This is primarily for testing
 purposes.  For convenience one can only put the X coord or XxY will also work.
 The default value is 0x0x0.
 .TP
 \fBConnection = MESH | TORUS | SMALL\fR
-Identify how the nodes should be connected in network. 
+Identify how the nodes should be connected in network.
 The default value is TORUS.
 .RS
 .TP
 \fBSmall\fR
 Equivalent to "Connection=Small".
-If a small connection is specified the base partition chosen will create 
-smaller partitions based on options \fB32CNBlocks\fR and 
-\fB128CNBlocks\fR respectively for a Bluegene L system.  
+If a small connection is specified the base partition chosen will create
+smaller partitions based on options \fB32CNBlocks\fR and
+\fB128CNBlocks\fR respectively for a Bluegene L system.
 \fB16CNBlocks\fR, \fB64CNBlocks\fR, and \fB256CNBlocks\fR are also
-available for Bluegene P systems.  Keep in mind you 
+available for Bluegene P systems.  Keep in mind you
 must have enough ionodes to make all these configurations possible.
-  These number will be altered to take up the 
-entire base partition. Size does not need to be specified with a small 
+  These number will be altered to take up the
+entire base partition. Size does not need to be specified with a small
 request, we will always default to 1 base partition for allocation.
 .TP
 \fBMesh\fR
@@ -298,7 +298,7 @@ Equivalent to "Connection=Torus".
 
 .TP
 \fBRotation = TRUE | FALSE\fR
-Specifies that the geometry specified in the size parameter may 
+Specifies that the geometry specified in the size parameter may
 be rotated in space (e.g. the Y and Z dimensions may be switched).
 The default value is FALSE.
 .TP
@@ -306,10 +306,10 @@ The default value is FALSE.
 Equivalent to "Rotation=true".
 .TP
 \fBElongation = TRUE | FALSE\fR
-If TRUE, permit the geometry specified in the size parameter to be altered as 
-needed to fit available resources. 
-For example, an allocation of "4x2x1" might be used to satisfy a size specification 
-of "2x2x2". 
+If TRUE, permit the geometry specified in the size parameter to be altered as
+needed to fit available resources.
+For example, an allocation of "4x2x1" might be used to satisfy a size specification
+of "2x2x2".
 The default value is FALSE.
 .TP
 \fBElongate\fR
@@ -317,23 +317,23 @@ Equivalent to "Elongation=true".
 
 .TP
 \fBcopy <id> <count>\fR
-Submit request for partition to be copied. 
-You may copy a specific partition by specifying its id, by default the 
-last configured partition is copied. 
-You may also specify a number of copies to be made. 
+Submit request for partition to be copied.
+You may copy a specific partition by specifying its id, by default the
+last configured partition is copied.
+You may also specify a number of copies to be made.
 By default, one copy is made.
 
 .TP
 \fBdelete <id>\fR
-Delete the specified block. 
+Delete the specified block.
 
 .TP
 \fBdown <node_range>\fR
-Down a specific node or range of nodes. 
+Down a specific node or range of nodes.
 i.e. 000, 000\-111 [000x111]
 .TP
 \fBup <node_range>\fR
-Bring a specific node or range of nodes up. 
+Bring a specific node or range of nodes up.
 i.e. 000, 000\-111 [000x111]
 .TP
 \fBalldown\fR
@@ -344,8 +344,8 @@ Set all nodes to up state.
 
 .TP
 \fBsave <file_name>\fR
-Save the current configuration to a file. 
-If no file_name is specified, the configuration is written to a 
+Save the current configuration to a file.
+If no file_name is specified, the configuration is written to a
 file named "bluegene.conf" in the current working directory.
 
 .TP
@@ -377,7 +377,7 @@ The node is allocated to one or more active jobs plus
 one or more jobs are in the process of COMPLETING.
 .TP
 \fBCOMPLETING\fR
-All jobs associated with this node are in the process of 
+All jobs associated with this node are in the process of
 COMPLETING.  This node state will be removed when
 all of the job's processes have terminated and the SLURM
 epilog program (if any) has terminated. See the \fBEpilog\fR
@@ -408,15 +408,15 @@ node\fR command in the \fBscontrol\fR(1) man page or the
 \fBslurm.conf\fR(5) man page for more information.
 .TP
 \fBFAIL\fR
-The node is expected to fail soon and is unavailable for 
-use per system administrator request.  
-See the \fBupdate node\fR command in the \fBscontrol\fR(1) 
+The node is expected to fail soon and is unavailable for
+use per system administrator request.
+See the \fBupdate node\fR command in the \fBscontrol\fR(1)
 man page or the \fBslurm.conf\fR(5) man page for more information.
 .TP
 \fBFAILING\fR
-The node is currently executing a job, but is expected to fail 
-soon and is unavailable for use per system administrator request.  
-See the \fBupdate node\fR command in the \fBscontrol\fR(1) 
+The node is currently executing a job, but is expected to fail
+soon and is unavailable for use per system administrator request.
+See the \fBupdate node\fR command in the \fBscontrol\fR(1)
 man page or the \fBslurm.conf\fR(5) man page for more information.
 .TP
 \fBIDLE\fR
@@ -432,7 +432,7 @@ has not yet been determined.
 .SH "JOB STATE CODES"
 Jobs typically pass through several states in the course of their
 execution.
-The typical states are \fBPENDING\fR, \fBRUNNING\fR, \fBSUSPENDED\fR, 
+The typical states are \fBPENDING\fR, \fBRUNNING\fR, \fBSUSPENDED\fR,
 \fBCOMPLETING\fR, and \fBCOMPLETED\fR.
 An explanation of each state follows.
 .TP 20
@@ -495,10 +495,10 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1), 
-\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3), 
-\fBslurm_load_partitions\fR(3), 
-\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3), 
-\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3), 
+\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1),
+\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3),
+\fBslurm_load_partitions\fR(3),
+\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3),
+\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3),
 \fBslurm_update_partition\fR(3),
 \fBslurm.conf\fR(5)
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 3634ed73b38141a3514d296494180010a5ba9560..5bfb4a3e997c734a31a84e953e461f560a0c7550 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -4,11 +4,11 @@
 squeue \- view information about jobs located in the SLURM scheduling queue.
 
 .SH "SYNOPSIS"
-\fBsqueue\fR [\fIOPTIONS\fR...] 
+\fBsqueue\fR [\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
-\fBsqueue\fR is used to view job and job step information for jobs managed by 
-SLURM. 
+\fBsqueue\fR is used to view job and job step information for jobs managed by
+SLURM.
 
 .SH "OPTIONS"
 
@@ -19,8 +19,8 @@ list of account names. This has no effect when listing job steps.
 
 .TP
 \fB\-a\fR, \fB\-\-all\fR
-Display information about jobs and job steps in all partions. 
-This causes information to be displayed about partitions that are configured as 
+Display information about jobs and job steps in all partions.
+This causes information to be displayed about partitions that are configured as
 hidden and partitions that are unavailable to user's group.
 
 .TP
@@ -33,8 +33,8 @@ Print a help message describing all options \fBsqueue\fR.
 
 .TP
 \fB\-\-hide\fR
-Do not display information about jobs and job steps in all partions. By default, 
-information about partitions that are configured as hidden or are not available 
+Do not display information about jobs and job steps in all partions. By default,
+information about partitions that are configured as hidden or are not available
 to the user's group will not be displayed (i.e. this is the default behavior).
 
 .TP
@@ -51,7 +51,7 @@ The \fB\-\-jobs=<job_id_list>\fR option may be used in conjunction with the
 
 .TP
 \fB\-l\fR, \fB\-\-long\fR
-Report more of the available information for the selected jobs or job steps, 
+Report more of the available information for the selected jobs or job steps,
 subject to any constraints specified.
 
 .TP
@@ -63,8 +63,8 @@ A node_name of \fBlocalhost\fR is mapped to the current host name.
 
 .TP
 \fB\-o <output_format>\fR, \fB\-\-format=<output_format>\fR
-Specify the information to be displayed, its size and position 
-(right or left justified).  
+Specify the information to be displayed, its size and position
+(right or left justified).
 The default formats with various options are
 
 .RS
@@ -83,7 +83,7 @@ The default formats with various options are
 The format of each field is "%[.][size]type".
 .RS
 .TP 8
-\fIsize\fR 
+\fIsize\fR
 is the minimum field size.
 If no size is specified, whatever is needed to print the information will be used.
 .TP
@@ -93,9 +93,9 @@ By default, output is right justified.
 .RE
 
 .IP
-Note that many of these \fItype\fR specifications are valid 
+Note that many of these \fItype\fR specifications are valid
 only for jobs while others are valid only for job steps.
-Valid \fItype\fR specifications include: 
+Valid \fItype\fR specifications include:
 
 .RS
 .TP 4
@@ -106,37 +106,37 @@ Account associated with the job.
 Number of tasks created by a job step.
 This reports the value of the \fBsrun \-\-ntasks\fR option.
 .TP
-\fB%c\fR 
+\fB%c\fR
 Minimum number of CPUs (processors) per node requested by the job.
-This reports the value of the \fBsrun \-\-mincpus\fR option with a 
+This reports the value of the \fBsrun \-\-mincpus\fR option with a
 default value of zero.
 .TP
-\fB%C\fR 
-Number of CPUs (processors) requested by the job or allocated to 
+\fB%C\fR
+Number of CPUs (processors) requested by the job or allocated to
 it if already running.
 .TP
-\fB%d\fR 
+\fB%d\fR
 Minimum size of temporary disk space (in MB) requested by the job.
 .TP
-\fB%D\fR 
-Number of nodes allocated to the job or the minimum number of nodes 
-required by a pending job. The actual number of nodes allocated to a pending 
-job may exceed this number if the job specified a node range count (e.g. 
-minimum and maximum node counts) or the the job specifies a processor 
-count instead of a node count and the cluster contains nodes with varying 
+\fB%D\fR
+Number of nodes allocated to the job or the minimum number of nodes
+required by a pending job. The actual number of nodes allocated to a pending
+job may exceed this number if the job specified a node range count (e.g.
+minimum and maximum node counts) or the the job specifies a processor
+count instead of a node count and the cluster contains nodes with varying
 processor counts.
 .TP
-\fB%e\fR 
+\fB%e\fR
 Time at which the job ended or is expected to end (based upon its time limit)
 .TP
 \fB%E\fR
-Job dependency. This job will not begin execution until the dependent job 
+Job dependency. This job will not begin execution until the dependent job
 completes.  A value of zero implies this job has no dependencies.
 .TP
-\fB%f\fR 
+\fB%f\fR
 Features required by the job.
 .TP
-\fB%g\fR 
+\fB%g\fR
 Group name of the job.
 .TP
 \fB%G\fR
@@ -145,33 +145,33 @@ Group ID of the job.
 \fB%h\fR
 Can the nodes allocated to the job be shared with other jobs.
 .TP
-\fB%H\fR 
+\fB%H\fR
 Minimum number of sockets per node requested by the job.
 This reports the value of the \fBsrun \-\-sockets\-per\-node\fR option.
 .TP
 \fB%i\fR
 Job or job step id.
 .TP
-\fB%I\fR 
+\fB%I\fR
 Minimum number of cores per socket requested by the job.
 This reports the value of the \fBsrun \-\-cores\-per\-socket\fR option.
 .TP
 \fB%j\fR
 Job or job step name.
 .TP
-\fB%J\fR 
+\fB%J\fR
 Minimum number of threads per core requested by the job.
 This reports the value of the \fBsrun \-\-threads\-per\-core\fR option.
 .TP
 \fB%k\fR
 Comment associated with the job.
 \fB%l\fR
-Time limit of the job or job step in days\-hours:minutes:seconds. 
+Time limit of the job or job step in days\-hours:minutes:seconds.
 The value may be "NOT_SET" if not yet established or "UNLIMITED" for no limit.
 .TP
 \fB%L\fR
-Time left for the job to execute in days\-hours:minutes:seconds. 
-This value is calculated by subtracting the job's time used from its time 
+Time left for the job to execute in days\-hours:minutes:seconds.
+This value is calculated by subtracting the job's time used from its time
 limit.
 The value may be "NOT_SET" if not yet established or "UNLIMITED" for no limit.
 .TP
@@ -179,21 +179,21 @@ The value may be "NOT_SET" if not yet established or "UNLIMITED" for no limit.
 Minimum size of memory (in MB) requested by the job
 .TP
 \fB%M\fR
-Time used by the job or job step in days\-hours:minutes:seconds. 
+Time used by the job or job step in days\-hours:minutes:seconds.
 The days and hours are printed only as needed.
-For job steps this field shows the elapsed time since execution began 
+For job steps this field shows the elapsed time since execution began
 and thus will be inaccurate for job steps which have been suspended.
 Clock skew between nodes in the cluster will cause the time to be inaccurate.
 If the time is obviously wrong (e.g. negative), it displays as "INVALID".
 .TP
 \fB%n\fR
-List of node names (or base partitions on BlueGene systems) explicitly 
+List of node names (or base partitions on BlueGene systems) explicitly
 requested by the job
 .TP
 \fB%N\fR
-List of nodes allocated to the job or job step. In the case of a 
+List of nodes allocated to the job or job step. In the case of a
 \fICOMPLETING\fR job, the list of nodes will comprise only those
-nodes that have not yet been returned to service. This may result 
+nodes that have not yet been returned to service. This may result
 in the node count being greater than the number of listed nodes.
 .TP
 \fB%O\fR
@@ -203,7 +203,7 @@ Are contiguous nodes requested by the job.
 Priority of the job (converted to a floating point number between 0.0 and 1.0).
 Also see \fB%Q\fR.
 .TP
-\fB%P\fR 
+\fB%P\fR
 Partition of the job or job step.
 .TP
 \fB%q\fR
@@ -218,47 +218,47 @@ The reason a job is in its current state.
 See the \fBJOB REASON CODES\fR section below for more information.
 .TP
 \fB%R\fR
-For pending jobs: the reason a job is waiting for execution 
+For pending jobs: the reason a job is waiting for execution
 is printed within parenthesis.
-For terminated jobs with failure: an explanation as to why the 
+For terminated jobs with failure: an explanation as to why the
 job failed is printed within parenthesis.
-For all other job states: the list of allocate nodes. 
+For all other job states: the list of allocate nodes.
 See the \fBJOB REASON CODES\fR section below for more information.
 .TP
-\fB%s\fR 
+\fB%s\fR
 Node selection plugin specific data for a job. Possible data includes:
-Geometry requirement of resource allocation (X,Y,Z dimensions), 
-Connection type (TORUS, MESH, or NAV == torus else mesh), 
-Permit rotation of geometry (yes or no), 
+Geometry requirement of resource allocation (X,Y,Z dimensions),
+Connection type (TORUS, MESH, or NAV == torus else mesh),
+Permit rotation of geometry (yes or no),
 Node use (VIRTUAL or COPROCESSOR),
 etc.
 .TP
-\fB%S\fR 
+\fB%S\fR
 Actual or expected start time of the job or job step.
 .TP
-\fB%t\fR 
+\fB%t\fR
 Job state, compact form:
-PD (pending), R (running), CA (cancelled), CF(configuring), 
-CG (completing), CD (completed), 
+PD (pending), R (running), CA (cancelled), CF(configuring),
+CG (completing), CD (completed),
 F (failed), TO (timeout), and NF (node failure).
 See the \fBJOB STATE CODES\fR section below for more information.
 .TP
-\fB%T\fR 
-Job state, extended form: 
-PENDING, RUNNING, SUSPENDED, CANCELLED, COMPLETING, COMPLETED, CONFIGURING, 
+\fB%T\fR
+Job state, extended form:
+PENDING, RUNNING, SUSPENDED, CANCELLED, COMPLETING, COMPLETED, CONFIGURING,
 FAILED, TIMEOUT, and NODE_FAIL.
 See the \fBJOB STATE CODES\fR section below for more information.
 .TP
-\fB%u\fR 
+\fB%u\fR
 User name for a job or job step.
 .TP
-\fB%U\fR 
+\fB%U\fR
 User ID for a job or job step.
 .TP
-\fB%v\fR 
+\fB%v\fR
 Reservation for the job.
 .TP
-\fB%x\fR 
+\fB%x\fR
 List of node names explicitly excluded by the job.
 .TP
 \fB%z\fR
@@ -268,7 +268,7 @@ the job.
 
 .TP
 \fB\-p <part_list>\fR, \fB\-\-partition=<part_list>\fR
-Specify the partitions of the jobs or steps to view. Accepts a comma separated 
+Specify the partitions of the jobs or steps to view. Accepts a comma separated
 list of partition names.
 
 .TP
@@ -278,7 +278,7 @@ separated list of qos's.
 
 .TP
 \fB\-s\fR, \fB\-\-steps\fR
-Specify the job steps to view.  This flag indicates that a comma separated list 
+Specify the job steps to view.  This flag indicates that a comma separated list
 of job steps to view follows without an equal sign (see examples).
 The job step format is "job_id.step_id". Defaults to all job steps.
 
@@ -286,21 +286,21 @@ The job step format is "job_id.step_id". Defaults to all job steps.
 \fB\-S <sort_list>\fR, \fB\-\-sort=<sort_list>\fR
 Specification of the order in which records should be reported.
 This uses the same field specifciation as the <output_format>.
-Multiple sorts may be performed by listing multiple sort fields 
+Multiple sorts may be performed by listing multiple sort fields
 separated by commas.
-The field specifications may be preceeded by "+" or "\-" for 
-ascending (default) and descending order respectively. 
+The field specifications may be preceeded by "+" or "\-" for
+ascending (default) and descending order respectively.
 For example, a sort value of "P,U" will sort the
-records by partition name then by user id. 
-The default value of sort for jobs is "P,t,\-p" (increasing partition 
-name then within a given partition by increasing node state and then 
+records by partition name then by user id.
+The default value of sort for jobs is "P,t,\-p" (increasing partition
+name then within a given partition by increasing node state and then
 decreasing priority).
-The default value of sort for job steps is "P,i" (increasing partition 
+The default value of sort for job steps is "P,i" (increasing partition
 name then within a given partition by increasing step id).
 
 .TP
 \fB\-\-start\fR
-Report the expected start time of pending jobs in order of increasing 
+Report the expected start time of pending jobs in order of increasing
 start time. This is equivalent to the following options:
 \fB\-\-format="%.7i %.9P %.8j %.8u %.2t  %.19S %.6D %R"\fR,
 \fB\-\-sort=S\fR and \fB\-\-states=PENDING\fR.
@@ -313,10 +313,10 @@ SLURM is configured to use the backfill scheduling plugin.
 .TP
 \fB\-t <state_list>\fR, \fB\-\-states=<state_list>\fR
 Specify the states of jobs to view.  Accepts a comma separated list of
-state names or "all". If "all" is specified then jobs of all states will be 
+state names or "all". If "all" is specified then jobs of all states will be
 reported. If no state is specified then pending, running, and completing
 jobs are reported. Valid states (in both extended and compact form) include:
-PENDING (PD), RUNNING (R), SUSPENDED (S), 
+PENDING (PD), RUNNING (R), SUSPENDED (S),
 COMPLETING (CG), COMPLETED (CD), CONFIGURING (CF), CANCELLED (CA),
 FAILED (F), TIMEOUT (TO), and NODE_FAIL (NF). Note the \fB<state_list>\fR
 supplied is case insensitve ("pd" and "PD" work the same).
@@ -340,8 +340,8 @@ Report details of squeues actions.
 Print version information and exit.
 
 .SH "JOB REASON CODES"
-These codes identify the reason that a job is waiting for execution. 
-A job may be waiting for more than one reason, in which case only 
+These codes identify the reason that a job is waiting for execution.
+A job may be waiting for more than one reason, in which case only
 one of those reasons is displayed.
 .TP 20
 \fBDependency\fR
@@ -354,7 +354,7 @@ No reason is set for this job.
 The partition required by this job is in a DOWN state.
 .TP
 \fBPartitionNodeLimit\fR
-The number of nodes required by this job is outside of it's 
+The number of nodes required by this job is outside of it's
 partitions current limits.
 Can also indicate that required nodes are DOWN or DRAINED.
 .TP
@@ -377,7 +377,7 @@ The job's constraints can not be satisfied.
 Failure of the SLURM system, a file system, the network, etc.
 .TP
 \fBJobLaunchFailure\fR
-The job could not be launched. 
+The job could not be launched.
 This may be due to a file system problem, invalid program name, etc.
 .TP
 \fBNonZeroExitCode\fR
@@ -390,8 +390,8 @@ The job exhausted its time limit.
 The job reached the system InactiveLimit.
 
 .SH "JOB STATE CODES"
-Jobs typically pass through several states in the course of their 
-execution. 
+Jobs typically pass through several states in the course of their
+execution.
 The typical states are PENDING, RUNNING, SUSPENDED, COMPLETING, and COMPLETED.
 An explanation of each state follows.
 .TP 20
@@ -429,8 +429,8 @@ Job terminated upon reaching its time limit.
 
 .SH "ENVIRONMENT VARIABLES"
 .PP
-Some \fBsqueue\fR options may be set via environment variables. These 
-environment variables, along with their corresponding options, are listed 
+Some \fBsqueue\fR options may be set via environment variables. These
+environment variables, along with their corresponding options, are listed
 below. (Note: Commandline options will always override these settings.)
 .TP 20
 \fBSLURM_CONF\fR
@@ -462,19 +462,19 @@ The location of the SLURM configuration file.
 
 .SH "EXAMPLES"
 .eo
-Print the jobs scheduled in the debug partition and in the 
-COMPLETED state in the format with six right justified digits for 
+Print the jobs scheduled in the debug partition and in the
+COMPLETED state in the format with six right justified digits for
 the job id followed by the priority with an arbitrary fields size:
 .br
 # squeue -p debug -t COMPLETED -o "%.6i %p"
 .br
  JOBID PRIORITY
 .br
- 65543 99993 
+ 65543 99993
 .br
- 65544 99992 
+ 65544 99992
 .br
- 65545 99991 
+ 65545 99991
 .ec
 
 .eo
@@ -534,8 +534,8 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-\fBscancel\fR(1), \fBscontrol\fR(1), \fBsinfo\fR(1), 
+\fBscancel\fR(1), \fBscontrol\fR(1), \fBsinfo\fR(1),
 \fBsmap\fR(1), \fBsrun\fR(1),
 \fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3),
-\fBslurm_load_node\fR(3), 
+\fBslurm_load_node\fR(3),
 \fBslurm_load_partitions\fR(3)
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 354a4ed56d387dc51e0fae2dfc92f99a00352944..3ec9547ed05b4e90e1c32ea896de83a03f23f533 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -8,7 +8,7 @@ sreport \- Generate reports from the slurm accounting data.
 
 .SH "DESCRIPTION"
 \fBsreport\fR is used to generate certain reports. It provides a view into accounting data gathered from slurm via
-the account information maintained within a database with the interface 
+the account information maintained within a database with the interface
 being provided by the \fBslurmdbd\fR (Slurm Database daemon).
 
 .SH "OPTIONS"
@@ -33,14 +33,14 @@ Output will be '|' delimited without a '|' at the end.
 Print no warning or informational messages, only error messages.
 .TP
 \fB\-t <format>\fR
-Specify the output time format. 
+Specify the output time format.
 Time format options are case insensitive and may be abbreviated.
 The default format is Minutes.
 Supported time format options are listed in the \fBtime\fP command
 section below.
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Print detailed event logging. 
+Print detailed event logging.
 .TP
 \fB\-V\fR , \fB\-\-version\fR
 Print version information and exit.
@@ -80,7 +80,7 @@ Identical to the \fBexit\fR command.
 
 .TP
 \fBtime <time_format>\fP
-Specify the output time format. 
+Specify the output time format.
 Time format options are case insensitive and may be abbreviated.
 The default format is Minutes.
 Supported time format options include:
@@ -110,7 +110,7 @@ Percentage of Total
 
 .TP
 \fBverbose\fP
-Enable detailed event logging. 
+Enable detailed event logging.
 
 .TP
 \fBversion\fP
@@ -148,13 +148,13 @@ UserUtilizationByWckey, Utilization, WCKeyUtilizationByUser
 REPORT DESCRIPTION
 .RS
 .TP
-.B cluster AccountUtilizationByUser 
+.B cluster AccountUtilizationByUser
 This report will display account utilization as it appears on the
 hierarchical tree.  Starting with the specified account or the
 root account by default this report will list the underlying
 usage with a sum on each level.  Use the 'tree' option to span
 the tree for better visibility.
-.TP 
+.TP
 .B cluster UserUtilizationByAccount
 This report will display users by account in order of utilization without
 grouping multiple accounts by user into one, but displaying them
@@ -174,7 +174,7 @@ the number should be grouped with idle time.
 .TP
 .B cluster WCKeyUtilizationByUser
 This report will display wckey utilization sorted by WCKey name for
-each user on each cluster.  
+each user on each cluster.
 
 .TP
 .B job SizesByAccount
@@ -187,7 +187,7 @@ accounts listed.
 .TP
 .B job SizesByWckey
 This report will dispay the amount of time for each wckey for job ranges
-specified by the 'grouping=' option.  
+specified by the 'grouping=' option.
 
 .TP
 .B reservation Utilization
@@ -197,7 +197,7 @@ This report will display total usage for reservations on the systems.
 .B user TopUsage
 Displays the top users on a cluster.  Use the group option to group
 accounts together.  The default is to have a different line for each
-user account combination.  
+user account combination.
 
 .TP
 Each report type has various options...
@@ -217,7 +217,7 @@ Period ending for report. Default is 23:59:59 of previous day.
 Valid time formats are...
 HH:MM[:SS] [AM|PM]
 MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
-MM/DD[/YY]-HH:MM[:SS]         
+MM/DD[/YY]-HH:MM[:SS]
 YYYY-MM-DD[THH:MM[:SS]]
 .TP
 .B Format=<OPT>
@@ -227,7 +227,7 @@ When using the format option for listing various fields you can put a
 %NUMBER afterwards to specify how many characters should be printed.
 
 i.e. format=name%30 will print 30 characters of field name right
-justified.  A \-30 will print 30 characters left justified.  
+justified.  A \-30 will print 30 characters left justified.
 
 .TP
 .B Start=<OPT>
@@ -235,7 +235,7 @@ Period start for report.  Default is 00:00:00 of previous day.
 Valid time formats are...
 HH:MM[:SS] [AM|PM]
 MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
-MM/DD[/YY]-HH:MM[:SS]         
+MM/DD[/YY]-HH:MM[:SS]
 YYYY-MM-DD[THH:MM[:SS]]
 .RE
 
@@ -246,7 +246,7 @@ CLUSTER
 .B Accounts=<OPT>
 When used with the UserUtilizationByAccount, or
 AccountUtilizationByUser, List of accounts to include in report.
-Default is all. 
+Default is all.
 .TP
 .B Tree
 When used with the AccountUtilizationByUser report will span the
@@ -258,11 +258,11 @@ include in report.  Default is all.
 .TP
 .B Wckeys=<OPT>
 When used with the UserUtilizationByWckey or WCKeyUtilizationByUser,
-List of wckeys to include in report. Default is all. 
+List of wckeys to include in report. Default is all.
 .RE
 
 .TP
-JOB    
+JOB
 .RS
 .TP
 .B Accounts=<OPT>
@@ -295,7 +295,7 @@ List of partitions jobs ran on to include in report.  Default is all.
 .TP
 .B PrintJobCount
 When used with the Sizes report will print number of jobs ran instead
-of time used.  
+of time used.
 .TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
@@ -333,10 +333,10 @@ Default is 10.
 .TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
-.RE  
+.RE
 
 .TP
- 
+
 .SH "Format Options for Each Report"
 
 \fBCluster\fP
@@ -364,7 +364,7 @@ List of users jobs to include in report.  Default is all.
 \fBUser\fP
        TopUsage
              \- Account, Cluster, Login, Proper, Used
-                                                                           
+
 .TP
 All commands and options are case-insensitive.
 .TP
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 7b14b94655e96041f7a8713d25085b192ad63b95..a637b6db963b23ed72801ee4d7017764b4243287 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -49,12 +49,12 @@ options if desired:
 When the task/affinity plugin is enabled,
 specifying an allocation in this manner also instructs SLURM to use
 a CPU affinity mask to guarantee the request is filled as specified.
-NOTE: Support for these options are configuration dependent. 
+NOTE: Support for these options are configuration dependent.
 The task/affinity plugin must be configured.
-In addition either select/linear or select/cons_res plugin must be 
+In addition either select/linear or select/cons_res plugin must be
 configured.
-If select/cons_res is configured, it must have a parameter of CR_Core, 
-CR_Core_Memory, CR_Socket, or CR_Socket_Memory. 
+If select/cons_res is configured, it must have a parameter of CR_Core,
+CR_Core_Memory, CR_Socket, or CR_Socket_Memory.
 
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
@@ -101,15 +101,15 @@ already passed for that year, in which case the next year is used.
 
 .TP
 \fB\-\-checkpoint\fR=<\fItime\fR>
-Specifies the interval between creating checkpoints of the job step. 
+Specifies the interval between creating checkpoints of the job step.
 By default, the job step will no checkpoints created.
-Acceptable time formats include "minutes", "minutes:seconds", 
-"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and 
+Acceptable time formats include "minutes", "minutes:seconds",
+"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and
 "days\-hours:minutes:seconds".
 
 .TP
 \fB\-\-checkpoint\-dir\fR=<\fIdirectory\fR>
-Specifies the directory into which the job or job step's checkpoint should 
+Specifies the directory into which the job or job step's checkpoint should
 be written (used by the checkpoint/blcr and checkpoint/xlch plugins only).
 The default value is the current working directory.
 Checkpoint files will be of the form "<job_id>.ckpt" for jobs
@@ -121,30 +121,30 @@ An arbitrary comment.
 
 .TP
 \fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
-Specify a list of constraints. 
-The constraints are features that have been assigned to the nodes by 
-the slurm administrator. 
-The \fIlist\fR of constraints may include multiple features separated 
+Specify a list of constraints.
+The constraints are features that have been assigned to the nodes by
+the slurm administrator.
+The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
-For example: \fB\-\-constraint="opteron&video"\fR or 
+For example: \fB\-\-constraint="opteron&video"\fR or
 \fB\-\-constraint="fast|faster"\fR.
 In the first example, only nodes having both the feature "opteron" AND
 the feature "video" will be used.
 There is no mechanism to specify that you want one node with feature
 "opteron" and another node with feature "video" in that case that no
 node has both features.
-If only one of a set of possible options should be used for all allocated 
-nodes, then use the OR operator and enclose the options within square brackets. 
-For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might 
-be used to specify that all nodes must be allocated on a single rack of 
+If only one of a set of possible options should be used for all allocated
+nodes, then use the OR operator and enclose the options within square brackets.
+For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might
+be used to specify that all nodes must be allocated on a single rack of
 the cluster, but any of those four racks can be used.
 A request can also specify the number of nodes needed with some feature
-by appending an asterisk and count after the feature name. 
-For example "\fBsrun \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR 
+by appending an asterisk and count after the feature name.
+For example "\fBsrun \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR
 indicates that the job requires 16 nodes at that at least four of those
 nodes must have the feature "graphics."
 Constraints with node counts may only be combined with AND operators.
-If no nodes have the requested features, then the job will be rejected 
+If no nodes have the requested features, then the job will be rejected
 by the slurm job manager.
 
 .TP
@@ -288,22 +288,22 @@ Show this help message
 Request that \fIncpus\fR be allocated \fBper process\fR. This may be
 useful if the job is multithreaded and requires more than one CPU
 per task for optimal performance. The default is one CPU per process.
-If \fB\-c\fR is specified without \fB\-n\fR, as many 
+If \fB\-c\fR is specified without \fB\-n\fR, as many
 tasks will be allocated per node as possible while satisfying
 the \fB\-c\fR restriction. For instance on a cluster with 8 CPUs
-per node, a job request for 4 nodes and 3 CPUs per task may be 
-allocated 3 or 6 CPUs per node (1 or 2 tasks per node) depending 
-upon resource consumption by other jobs. Such a job may be 
-unable to execute more than a total of 4 tasks. 
+per node, a job request for 4 nodes and 3 CPUs per task may be
+allocated 3 or 6 CPUs per node (1 or 2 tasks per node) depending
+upon resource consumption by other jobs. Such a job may be
+unable to execute more than a total of 4 tasks.
 This option may also be useful to spawn tasks without allocating
-resources to the job step from the job's allocation when running 
+resources to the job step from the job's allocation when running
 multiple job steps with the \fB\-\-exclusive\fR option.
 
 .TP
 \fB\-d\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
-<\fIdependency_list\fR> is of the form 
+<\fIdependency_list\fR> is of the form
 <\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
 Many jobs can share the same dependency and these jobs may even belong to
 different  users. The  value may be changed after job submission using the
@@ -349,7 +349,7 @@ If the specified file already exists, it will be overwritten.
 
 .TP
 \fB\-E\fR, \fB\-\-preserve-env\fR
-Pass the current values of environment variables SLURM_NNODES and 
+Pass the current values of environment variables SLURM_NNODES and
 SLURM_NPROCS through to the \fIexecutable\fR, rather than computing them
 from commandline parameters.
 
@@ -363,28 +363,28 @@ parameter in slurm.conf.
 
 .TP
 \fB\-\-exclusive\fR
-When used to initiate a job, the job allocation cannot share nodes with 
-other running jobs.  This is the oposite of \-\-share, whichever option 
-is seen last on the command line will win.  (The default shared/exclusive 
+When used to initiate a job, the job allocation cannot share nodes with
+other running jobs.  This is the oposite of \-\-share, whichever option
+is seen last on the command line will win.  (The default shared/exclusive
 behaviour depends on system configuration.)
 
-This option can also be used when initiating more than job step within 
-an existing resource allocation and you want separate processors to 
-be dedicated to each job step. If sufficient processors are not 
-available to initiate the job step, it will be deferred. This can 
+This option can also be used when initiating more than job step within
+an existing resource allocation and you want separate processors to
+be dedicated to each job step. If sufficient processors are not
+available to initiate the job step, it will be deferred. This can
 be thought of as providing resource management for the job within
 it's allocation. Note that all CPUs allocated to a job are available
 to each job step unless the \fB\-\-exclusive\fR option is used plus
-task affinity is configured. Since resource management is provided by 
-processor, the \fB\-\-ntasks\fR option must be specified, but the 
-following options should NOT be specified \fB\-\-nodes\fR, 
+task affinity is configured. Since resource management is provided by
+processor, the \fB\-\-ntasks\fR option must be specified, but the
+following options should NOT be specified \fB\-\-nodes\fR,
 \fB\-\-relative\fR, \fB\-\-distribution\fR=\fIarbitrary\fR.
 See \fBEXAMPLE\fR below.
 
 .TP
 \fB\-\-gid\fR=<\fIgroup\fR>
-If \fBsrun\fR is run as root, and the \fB\-\-gid\fR option is used, 
-submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR 
+If \fBsrun\fR is run as root, and the \fB\-\-gid\fR option is used,
+submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR
 may be the group name or the numerical group ID.
 
 .\".TP
@@ -420,8 +420,8 @@ show this help message
 
 .TP
 \fB\-I\fR, \fB\-\-immediate\fR[=<\fIseconds\fR>]
-exit if resources are not available within the 
-time period specified. 
+exit if resources are not available within the
+time period specified.
 If no argument is given, resources must be available immediately
 for the request to succeed.
 By default, \fB\-\-immediate\fR is off, and the command
@@ -433,7 +433,7 @@ Specify how stdin is to redirected. By default,
 .B srun
 redirects stdin from the terminal all tasks. See \fBIO Redirection\fR
 below for more options.
-For OS X, the poll() function does not support stdin, so input from 
+For OS X, the poll() function does not support stdin, so input from
 a terminal is not possible.
 
 .TP
@@ -455,10 +455,10 @@ Terminate a job if any task exits with a non\-zero exit code.
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
 Do not automatically terminate a job of one of the nodes it has been
-allocated fails.  This option is only recognized on a job allocation, 
-not for the submission of individual job steps. 
-The job will assume all responsibilities for fault\-tolerance. The 
-active job step (MPI job) will almost certainly suffer a fatal error, 
+allocated fails.  This option is only recognized on a job allocation,
+not for the submission of individual job steps.
+The job will assume all responsibilities for fault\-tolerance. The
+active job step (MPI job) will almost certainly suffer a fatal error,
 but subsequent job steps may be run if this option is specified. The
 default action is to terminate job upon node failure.
 
@@ -472,9 +472,9 @@ task id.
 
 .TP
 \fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
-Specification of licenses (or other resources available on all 
+Specification of licenses (or other resources available on all
 nodes of the cluster) which must be allocated to this job.
-License names can be followed by an asterisk and count 
+License names can be followed by an asterisk and count
 (the default count is one).
 Multiple license names should be comma separated (e.g.
 "\-\-licenses=foo*4,bar").
@@ -487,13 +487,13 @@ Specify an alternate distribution method for remote processes.
 .TP
 .B block
 The block method of distribution will allocate processes in\-order to
-the cpus on a node. If the number of processes exceeds the number of 
-cpus on all of the nodes in the allocation then all nodes will be 
-utilized. For example, consider an allocation of three nodes each with 
-two cpus. A four\-process block distribution request will distribute 
-those processes to the nodes with processes one and two on the first 
-node, process three on the second node, and process four on the third node.  
-Block distribution is the default behavior if the number of tasks 
+the cpus on a node. If the number of processes exceeds the number of
+cpus on all of the nodes in the allocation then all nodes will be
+utilized. For example, consider an allocation of three nodes each with
+two cpus. A four\-process block distribution request will distribute
+those processes to the nodes with processes one and two on the first
+node, process three on the second node, and process four on the third node.
+Block distribution is the default behavior if the number of tasks
 exceeds the number of nodes requested.
 .TP
 .B cyclic
@@ -505,7 +505,7 @@ if the number of tasks is no larger than the number of nodes requested.
 .B plane
 The tasks are distributed in blocks of a specified size.
 The options include a number representing the size of the task block.
-This is followed by an optional specification of the task distribution 
+This is followed by an optional specification of the task distribution
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
 .br
@@ -516,9 +516,9 @@ and
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
 .TP
 .B arbitrary
-The arbitrary method of distribution will allocate processes in\-order as 
+The arbitrary method of distribution will allocate processes in\-order as
 listed in file designated by the environment variable SLURM_HOSTFILE.  If
-this variable is listed it will over ride any other method specified. 
+this variable is listed it will over ride any other method specified.
 If not set the method will default to block.  Inside the hostfile must
 contain at minimum the number of hosts requested.  If requesting tasks
 (\-n) your tasks will be laid out on the nodes in the order of the file.
@@ -526,13 +526,13 @@ contain at minimum the number of hosts requested.  If requesting tasks
 
 .TP
 \fB\-\-mail\-type\fR=<\fItype\fR>
-Notify user by email when certain event types occur. 
-Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change). 
-The user to be notified is indicated with \fB\-\-mail\-user\fR. 
+Notify user by email when certain event types occur.
+Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change).
+The user to be notified is indicated with \fB\-\-mail\-user\fR.
 
 .TP
 \fB\-\-mail\-user\fR=<\fIuser\fR>
-User to receive email notification of state changes as defined by 
+User to receive email notification of state changes as defined by
 \fB\-\-mail\-type\fR.
 The default value is the submitting user.
 
@@ -551,7 +551,7 @@ Also see \fB\-\-mem\-per\-cpu\fR.
 \fB\-\-mem\-per\-cpu\fR=<\fIMB\fR>
 Mimimum memory required per allocated CPU in MegaBytes.
 Default value is \fBDefMemPerCPU\fR and the maximum value is
-\fBMaxMemPerCPU\fR. If configured, both of parameters can be 
+\fBMaxMemPerCPU\fR. If configured, both of parameters can be
 seen using the \fBscontrol show config\fR command.
 This parameter would generally be used of individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
@@ -560,16 +560,16 @@ Also see \fB\-\-mem\fR.
 
 .TP
 \fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
-Bind tasks to memory. Used only when the task/affinity plugin is enabled 
+Bind tasks to memory. Used only when the task/affinity plugin is enabled
 and the NUMA memory functions are available.
-\fBNote that the resolution of CPU and memory binding 
-may differ on some architectures.\fR For example, CPU binding may be performed 
-at the level of the cores within a processor while memory binding will 
-be performed at the level of nodes, where the definition of "nodes" 
-may differ from system to system. \fBThe use of any type other than 
+\fBNote that the resolution of CPU and memory binding
+may differ on some architectures.\fR For example, CPU binding may be performed
+at the level of the cores within a processor while memory binding will
+be performed at the level of nodes, where the definition of "nodes"
+may differ from system to system. \fBThe use of any type other than
 "none" or "local" is not recommended.\fR
-If you want greater control, try running a simple test code with the 
-options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine 
+If you want greater control, try running a simple test code with the
+options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine
 the specific configuration.
 
 NOTE: To have SLURM always report on the selected memory binding for
@@ -643,12 +643,12 @@ Specify a minimum number of threads per core.
 .TP
 \fB\-\-msg\-timeout\fR=<\fIseconds\fR>
 Modify the job launch message timeout.
-The default value is \fBMessageTimeout\fR in the SLURM configuration file slurm.conf. 
+The default value is \fBMessageTimeout\fR in the SLURM configuration file slurm.conf.
 Changes to this are typically not recommended, but could be useful to diagnose problems.
 
 .TP
 \fB\-\-mpi\fR=<\fImpi_type\fR>
-Identify the type of MPI to be used. May result in unique initiation 
+Identify the type of MPI to be used. May result in unique initiation
 procedures.
 .RS
 .TP
@@ -674,17 +674,17 @@ For use with Infiniband.
 For use with OpenMPI.
 .TP
 .B none
-No special MPI processing. This is the default and works with 
+No special MPI processing. This is the default and works with
 many other versions of MPI.
 .RE
 
 .TP
 \fB\-\-multi\-prog\fR
-Run a job with different programs and different arguments for 
-each task. In this case, the executable program specified is 
-actually a configuration file specifying the executable and 
+Run a job with different programs and different arguments for
+each task. In this case, the executable program specified is
+actually a configuration file specifying the executable and
 arguments for each task. See \fBMULTIPLE PROGRAM CONFIGURATION\fR
-below for details on the configuration file contents. 
+below for details on the configuration file contents.
 
 .TP
 \fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
@@ -694,16 +694,16 @@ A limit on the maximum node count may be specified with \fImaxnodes\fR
 (e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
 same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
 for two and ONLY two nodes).
-The partition's node limits supersede those of the job. 
-If a job's node limits are outside of the range permitted for its 
-associated partition, the job will be left in a PENDING state. 
-This permits possible execution at a later time, when the partition 
+The partition's node limits supersede those of the job.
+If a job's node limits are outside of the range permitted for its
+associated partition, the job will be left in a PENDING state.
+This permits possible execution at a later time, when the partition
 limit is changed.
-If a job node limit exceeds the number of nodes configured in the 
+If a job node limit exceeds the number of nodes configured in the
 partition, the job will be rejected.
-Note that the environment 
-variable \fBSLURM_NNODES\fR will be set to the count of nodes actually 
-allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section 
+Note that the environment
+variable \fBSLURM_NNODES\fR will be set to the count of nodes actually
+allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section
 for more information.  If \fB\-N\fR is not specified, the default
 behavior is to allocate enough nodes to satisfy the requirements of
 the \fB\-n\fR and \fB\-c\fR options.
@@ -713,25 +713,25 @@ and without delaying the initiation of the job.
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
 Specify the number of tasks to run. Request that \fBsrun\fR
-allocate resources for \fIntasks\fR tasks.  
-The default is one task per socket or core (depending upon the value 
-of the \fISelectTypeParameters\fR parameter in slurm.conf), but note 
+allocate resources for \fIntasks\fR tasks.
+The default is one task per socket or core (depending upon the value
+of the \fISelectTypeParameters\fR parameter in slurm.conf), but note
 that the \fB\-\-cpus\-per\-task\fR option will change this default.
 
 .TP
 \fB\-\-network\fR=<\fItype\fR>
-Specify the communication protocol to be used. 
+Specify the communication protocol to be used.
 This option is supported on AIX systems.
-Since POE is used to launch tasks, this option is not normally used or 
+Since POE is used to launch tasks, this option is not normally used or
 is specified using the \fBSLURM_NETWORK\fR environment variable.
 The interpretation of \fItype\fR is system dependent.
-For systems with an IBM Federation switch, the following 
+For systems with an IBM Federation switch, the following
 comma\-separated and case insensitive types are recognized:
-\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, 
-\fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR). 
+\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR,
+\fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR).
 For more information, on IBM systems see \fIpoe\fR documentation on
 the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR.
-Note that only four jobs steps may be active at once on a node with the 
+Note that only four jobs steps may be active at once on a node with the
 \fBBULK_XFER\fR option due to limitations in the Federation switch driver.
 
 .TP
@@ -741,7 +741,7 @@ With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
 to 10000 (lowest priority). Only privileged users can specify
 a negative adjustment. NOTE: This option is presently
-ignored if \fISchedulerType=sched/wiki\fR or 
+ignored if \fISchedulerType=sched/wiki\fR or
 \fISchedulerType=sched/wiki2\fR.
 
 .TP
@@ -793,7 +793,7 @@ Specify the mode for stdout redirection. By default in interactive mode,
 .B srun
 collects stdout from all tasks and line buffers this output to
 the attached terminal. With \fB\-\-output\fR stdout may be redirected
-to a file, to one file per task, or to /dev/null. See section 
+to a file, to one file per task, or to /dev/null. See section
 \fBIO Redirection\fR below for the various forms of \fImode\fR.
 If the specified file already exists, it will be overwritten.
 .br
@@ -869,7 +869,7 @@ The maximum stack size
 \fB\-\-pty\fR
 Execute task zero in pseudo terminal.
 Implicitly sets \fB\-\-unbuffered\fR.
-Implicitly sets \fB\-\-error\fR and \fB\-\-output\fR to /dev/null 
+Implicitly sets \fB\-\-error\fR and \fB\-\-output\fR to /dev/null
 for all tasks except task zero.
 Not currently supported on AIX platforms.
 
@@ -880,9 +880,9 @@ Suppress informational messages from srun. Errors will still be displayed.
 .TP
 \fB\-q\fR, \fB\-\-quit\-on\-interrupt\fR
 Quit immediately on single SIGINT (Ctrl\-C). Use of this option
-disables the status feature normally available when \fBsrun\fR receives 
+disables the status feature normally available when \fBsrun\fR receives
 a single Ctrl\-C and causes \fBsrun\fR to instead immediately terminate the
-running job. 
+running job.
 
 .TP
 \fB\-\-qos\fR=<\fIqos\fR>
@@ -894,16 +894,16 @@ the SLURM configuration parameter, AccountingStorageEnforce, includes
 
 .TP
 \fB\-r\fR, \fB\-\-relative\fR=<\fIn\fR>
-Run a job step relative to node \fIn\fR of the current allocation. 
+Run a job step relative to node \fIn\fR of the current allocation.
 This option may be used to spread several job steps out among the
 nodes of the current job. If \fB\-r\fR is used, the current job
 step will begin at node \fIn\fR of the allocated nodelist, where
-the first node is considered node 0.  The \fB\-r\fR option is not 
+the first node is considered node 0.  The \fB\-r\fR option is not
 permitted along with \fB\-w\fR or \fB\-x\fR, and will be silently
 ignored when not running within a prior allocation (i.e. when
-SLURM_JOB_ID is not set). The default for \fIn\fR is 0. If the 
-value of \fB\-\-nodes\fR exceeds the number of nodes identified 
-with the \fB\-\-relative\fR option, a warning message will be 
+SLURM_JOB_ID is not set). The default for \fIn\fR is 0. If the
+value of \fB\-\-nodes\fR exceeds the number of nodes identified
+with the \fB\-\-relative\fR option, a warning message will be
 printed and the \fB\-\-relative\fR option will take precedence.
 
 .TP
@@ -917,24 +917,24 @@ Allocate resources for the job from the named reservation.
 
 .TP
 \fB\-\-restart\-dir\fR=<\fIdirectory\fR>
-Specifies the directory from which the job or job step's checkpoint should 
+Specifies the directory from which the job or job step's checkpoint should
 be read (used by the checkpoint/blcrm and checkpoint/xlch plugins only).
 
 .TP
 \fB\-s\fR, \fB\-\-share\fR
-The job can share nodes with other running jobs. This may result in faster job 
+The job can share nodes with other running jobs. This may result in faster job
 initiation and higher system utilization, but lower application performance.
 
 .TP
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
-When a job is within \fIsig_time\fR seconds of its end time, 
-send it the signal \fIsig_num\fR. 
-Due to the resolution of event handling by SLURM, the signal may 
+When a job is within \fIsig_time\fR seconds of its end time,
+send it the signal \fIsig_num\fR.
+Due to the resolution of event handling by SLURM, the signal may
 be sent up to 60 seconds earlier than specified.
 Both \fIsig_time\fR and \fIsig_num\fR must have integer values
 between zero and 65535.
-By default, no signal is sent before the job's end time. 
-If a \fIsig_num\fR is specified without any \fIsig_time\fR, 
+By default, no signal is sent before the job's end time.
+If a \fIsig_num\fR is specified without any \fIsig_time\fR,
 the default time will be 60 seconds.
 
 .TP
@@ -947,16 +947,16 @@ the job. By default only errors are displayed.
 .TP
 \fB\-T\fR, \fB\-\-threads\fR=<\fInthreads\fR>
 Request that \fBsrun\fR
-use \fInthreads\fR to initiate and control the parallel job. The 
+use \fInthreads\fR to initiate and control the parallel job. The
 default value is the smaller of 60 or the number of nodes allocated.
-This should only be used to set a low thread count for testing on 
+This should only be used to set a low thread count for testing on
 very small memory computers.
 
 .TP
 \fB\-t\fR, \fB\-\-time\fR=<\fItime\fR>
 Set a limit on the total run time of the job or job step.  If the
-requested time limit for a job exceeds the partition's time limit, 
-the job will be left in a PENDING state (possibly indefinitely).  
+requested time limit for a job exceeds the partition's time limit,
+the job will be left in a PENDING state (possibly indefinitely).
 If the requested time limit for a job step exceeds the partition's
 time limit, the job step will not be initiated.  The default time
 limit is the partition's time limit.  When the time limit is reached,
@@ -980,8 +980,8 @@ killed along with any descendant processes.
 
 .TP
 \fB\-\-task\-prolog\fR=<\fIexecutable\fR>
-The \fBslurmstepd\fR daemon will run \fIexecutable\fR just before launching 
-each task. This will be executed after any TaskProlog parameter 
+The \fBslurmstepd\fR daemon will run \fIexecutable\fR just before launching
+each task. This will be executed after any TaskProlog parameter
 in slurm.conf is executed.
 Besides the normal environment variables, this has SLURM_TASK_PID
 available to identify the process ID of the task being started.
@@ -1076,8 +1076,8 @@ Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-\-conn\-type\fR=<\fItype\fR>
-Require the partition connection type to be of a certain type.  
-On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.  
+Require the partition connection type to be of a certain type.
+On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
 You should not normally set this option.
 SLURM will normally allocate a TORUS if possible for a given geometry.
@@ -1087,10 +1087,10 @@ for virtual node mode, and HTC_L for Linux mode.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
-Specify the geometry requirements for the job. The three numbers 
-represent the required geometry giving dimensions in the X, Y and 
-Z directions. For example "\-\-geometry=2x3x4", specifies a block 
-of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on 
+Specify the geometry requirements for the job. The three numbers
+represent the required geometry giving dimensions in the X, Y and
+Z directions. For example "\-\-geometry=2x3x4", specifies a block
+of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on
 Blue Gene).
 
 .TP
@@ -1110,7 +1110,7 @@ Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-R\fR, \fB\-\-no\-rotate\fR
-Disables rotation of the job's requested geometry in order to fit an 
+Disables rotation of the job's requested geometry in order to fit an
 appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
@@ -1172,13 +1172,13 @@ will consider this an error, as 15 processes cannot run across 16 nodes.
 .B "IO Redirection"
 .PP
 By default, stdout and stderr will be redirected from all tasks to the
-stdout and stderr of \fBsrun\fR, and stdin will be redirected from the 
-standard input of \fBsrun\fR to all remote tasks. 
-For OS X, the poll() function does not support stdin, so input from 
+stdout and stderr of \fBsrun\fR, and stdin will be redirected from the
+standard input of \fBsrun\fR to all remote tasks.
+For OS X, the poll() function does not support stdin, so input from
 a terminal is not possible.
-This behavior may be changed with the 
-\fB\-\-output\fR, \fB\-\-error\fR, and \fB\-\-input\fR 
-(\fB\-o\fR, \fB\-e\fR, \fB\-i\fR) options. Valid format specifications 
+This behavior may be changed with the
+\fB\-\-output\fR, \fB\-\-error\fR, and \fB\-\-input\fR
+(\fB\-o\fR, \fB\-e\fR, \fB\-i\fR) options. Valid format specifications
 for these options are
 .TP 10
 \fBall\fR
@@ -1187,7 +1187,7 @@ stdin is broadcast to all remote tasks.
 (This is the default behavior)
 .TP
 \fBnone\fR
-stdout and stderr is not received from any task. 
+stdout and stderr is not received from any task.
 stdin is not sent to any task (stdin is closed).
 .TP
 \fItaskid\fR
@@ -1198,7 +1198,7 @@ stdin is redirected from the stdin of \fBsrun\fR to this same task.
 This file will be written on the node executing the task.
 .TP
 \fIfilename\fR
-\fBsrun\fR will redirect stdout and/or stderr to the named file from 
+\fBsrun\fR will redirect stdout and/or stderr to the named file from
 all tasks.
 stdin will be redirected from the named file and broadcast to all
 tasks in the job.  \fIfilename\fR refers to a path on the host
@@ -1207,22 +1207,22 @@ this may result in the output appearing in different places depending
 on whether the job is run in batch mode.
 .TP
 format string
-\fBsrun\fR allows for a format string to be used to generate the 
-named IO file 
+\fBsrun\fR allows for a format string to be used to generate the
+named IO file
 described above. The following list of format specifiers may be
 used in the format string to generate a filename that will be
-unique to a given jobid, stepid, node, or task. In each case, 
+unique to a given jobid, stepid, node, or task. In each case,
 the appropriate number of files are opened and associated with
-the corresponding tasks. Note that any format string containing 
-%t, %n, and/or %N will be written on the node executing the task 
+the corresponding tasks. Note that any format string containing
+%t, %n, and/or %N will be written on the node executing the task
 rather than the node where \fBsrun\fR executes.
 .RS 10
 .TP
 %J
 jobid.stepid of the running job. (e.g. "128.0")
 .TP
-%j 
-jobid of the running job. 
+%j
+jobid of the running job.
 .TP
 %s
 stepid of the running job.
@@ -1239,7 +1239,7 @@ task identifier (rank) relative to current job. This will create a
 separate IO file per task.
 .PP
 A number placed between the percent character and format specifier may be
-used to zero\-pad the result in the IO filename. This number is ignored if 
+used to zero\-pad the result in the IO filename. This number is ignored if
 the format specifier corresponds to  non\-numeric data (%N for example).
 
 Some examples of how the format string may be used for a 4 task job step
@@ -1251,7 +1251,7 @@ job128.0.out
 job%4j.out
 job0128.out
 .TP
-job%j\-%2t.out 
+job%j\-%2t.out
 job128\-00.out, job128\-01.out, ...
 .PP
 .RS -10
@@ -1259,8 +1259,8 @@ job128\-00.out, job128\-01.out, ...
 
 .SH "INPUT ENVIRONMENT VARIABLES"
 .PP
-Some srun options may be set via environment variables. 
-These environment variables, along with their corresponding options, 
+Some srun options may be set via environment variables.
+These environment variables, along with their corresponding options,
 are listed below.
 Note: Command line options will always override these settings.
 .TP 22
@@ -1268,10 +1268,10 @@ Note: Command line options will always override these settings.
 This is used exclusively with PMI (MPICH2 and MVAPICH2) and
 controls the fanout of data communications. The srun command
 sends messages to application programs (via the PMI library)
-and those applications may be called upon to forward that 
+and those applications may be called upon to forward that
 data to up to this number of additional tasks. Higher values
-offload work from the srun command to the applications and 
-likely increase the vulnerability to failures. 
+offload work from the srun command to the applications and
+likely increase the vulnerability to failures.
 The default value is 32.
 .TP
 \fBPMI_FANOUT_OFF_HOST\fR
@@ -1279,23 +1279,23 @@ This is used exclusively with PMI (MPICH2 and MVAPICH2) and
 controls the fanout of data communications.  The srun command
 sends messages to application programs (via the PMI library)
 and those applications may be called upon to forward that
-data to additional tasks. By default, srun sends one message 
-per host and one task on that host forwards the data to other 
-tasks on that host up to \fBPMI_FANOUT\fR. 
-If \fBPMI_FANOUT_OFF_HOST\fR is defined, the user task 
+data to additional tasks. By default, srun sends one message
+per host and one task on that host forwards the data to other
+tasks on that host up to \fBPMI_FANOUT\fR.
+If \fBPMI_FANOUT_OFF_HOST\fR is defined, the user task
 may be required to forward the data to tasks on other hosts.
-Setting \fBPMI_FANOUT_OFF_HOST\fR may increase performance. 
+Setting \fBPMI_FANOUT_OFF_HOST\fR may increase performance.
 Since more work is performed by the PMI library loaded by
-the user application, failures also can be more common and 
+the user application, failures also can be more common and
 more difficult to diagnose.
 .TP
 \fBPMI_TIME\fR
-This is used exclusively with PMI (MPICH2 and MVAPICH2) and 
-controls how much the communications from the tasks to the 
-srun are spread out in time in order to avoid overwhelming the 
+This is used exclusively with PMI (MPICH2 and MVAPICH2) and
+controls how much the communications from the tasks to the
+srun are spread out in time in order to avoid overwhelming the
 srun command with work. The default value is 500 (microseconds)
-per task. On relatively slow processors or systems with very 
-large processor counts (and large PMI data sets), higher values 
+per task. On relatively slow processors or systems with very
+large processor counts (and large PMI data sets), higher values
 may be required.
 .TP
 \fBSLURM_CONF\fR
@@ -1350,15 +1350,15 @@ Same as \fB\-\-epilog\fR
 Same as \fB\-\-exclusive\fR
 .TP
 \fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a SLURM error occurs 
+Specifies the exit code generated when a SLURM error occurs
 (e.g. invalid options).
 This can be used by a script to distinguish application exit codes from
 various SLURM error conditions.
 Also see \fBSLURM_EXIT_IMMEDIATE\fR.
 .TP
 \fBSLURM_EXIT_IMMEDIATE\fR
-Specifies the exit code generated when the \fB\-\-immediate\fR option 
-is used and resources are not currently available. 
+Specifies the exit code generated when the \fB\-\-immediate\fR option
+is used and resources are not currently available.
 This can be used by a script to distinguish application exit codes from
 various SLURM error conditions.
 Also see \fBSLURM_EXIT_ERROR\fR.
@@ -1457,8 +1457,8 @@ Same as \fB\-W, \-\-wckey\fR
 
 .SH "OUTPUT ENVIRONMENT VARIABLES"
 .PP
-srun will set some environment variables in the environment 
-of the executing tasks on the remote compute nodes. 
+srun will set some environment variables in the environment
+of the executing tasks on the remote compute nodes.
 These environment variables are:
 
 .TP 22
@@ -1483,10 +1483,10 @@ if specified on the execute line.
 .TP
 \fBSLURM_CPUS_ON_NODE\fR
 Count of processors available to the job on this node.
-Note the select/linear plugin allocates entire nodes to 
+Note the select/linear plugin allocates entire nodes to
 jobs, so the value indicates the total count of CPUs on the node.
-The select/cons_res plugin allocates individual processors 
-to jobs, so this number indicates the number of processors 
+The select/cons_res plugin allocates individual processors
+to jobs, so this number indicates the number of processors
 on this node allocated to the job.
 
 .TP
@@ -1502,7 +1502,7 @@ Job id of the executing job
 
 .TP
 \fBSLURM_LAUNCH_NODE_IPADDR\fR
-IP address of the node from which the task launch was 
+IP address of the node from which the task launch was
 initiated (where the srun command ran from)
 .TP
 \fBSLURM_LOCALID\fR
@@ -1567,19 +1567,19 @@ Do not free a block on Blue Gene systems only.
 The block name on Blue Gene systems only.
 
 .SH "SIGNALS AND ESCAPE SEQUENCES"
-Signals sent to the \fBsrun\fR command are automatically forwarded to 
+Signals sent to the \fBsrun\fR command are automatically forwarded to
 the tasks it is controlling with a few exceptions. The escape sequence
-\fB<control\-c>\fR will report the state of all tasks associated with 
-the \fBsrun\fR command. If \fB<control\-c>\fR is entered twice within 
+\fB<control\-c>\fR will report the state of all tasks associated with
+the \fBsrun\fR command. If \fB<control\-c>\fR is entered twice within
 one second, then the associated SIGINT signal will be sent to all tasks
-and a termination sequence will be entered sending SIGCONT, SIGTERM, 
+and a termination sequence will be entered sending SIGCONT, SIGTERM,
 and SIGKILL to all spawned tasks.
-If a third \fB<control\-c>\fR is received, the srun program will be 
+If a third \fB<control\-c>\fR is received, the srun program will be
 terminated without waiting for remote tasks to exit or their I/O to
 complete.
 
-The escape sequence \fB<control\-z>\fR is presently ignored. Our intent 
-is for this put the \fBsrun\fR command into a mode where various special 
+The escape sequence \fB<control\-z>\fR is presently ignored. Our intent
+is for this put the \fBsrun\fR command into a mode where various special
 actions may be invoked.
 
 .SH "MPI SUPPORT"
@@ -1593,7 +1593,7 @@ and some MPICH1 modes). For example: "srun \-n16 a.out".
 
 2. SLURM creates a resource allocation for the job and then
 mpirun launches tasks using SLURM's infrastructure (OpenMPI,
-LAM/MPI, HP-MPI and some MPICH1 modes). 
+LAM/MPI, HP-MPI and some MPICH1 modes).
 
 3. SLURM creates a resource allocation for the job and then
 mpirun launches tasks using some mechanism other than SLURM,
@@ -1602,8 +1602,8 @@ These tasks initiated outside of SLURM's monitoring
 or control. SLURM's epilog should be configured to purge
 these tasks when the job's allocation is relinquished.
 
-See \fIhttps://computing.llnl.gov/linux/slurm/mpi_guide.html\fR 
-for more information on use of these various MPI implementation 
+See \fIhttps://computing.llnl.gov/linux/slurm/mpi_guide.html\fR
+for more information on use of these various MPI implementation
 with SLURM.
 
 .SH "MULTIPLE PROGRAM CONFIGURATION"
@@ -1614,11 +1614,11 @@ space:
 Task rank
 One or more task ranks to use this configuration.
 Multiple values may be comma separated.
-Ranges may be indicated with two numbers separated with a '\-' with 
+Ranges may be indicated with two numbers separated with a '\-' with
 the smaller number first (e.g. "0\-4" and not "4\-0").
-To indicate all tasks, specify a rank of '*' (in which case you probably 
+To indicate all tasks, specify a rank of '*' (in which case you probably
 should not be using this option).
-If an attempt is made to initiate a task for which no executable 
+If an attempt is made to initiate a task for which no executable
 program is defined, the following error message will be produced
 "No executable program specified for this task".
 .TP
@@ -1661,8 +1661,8 @@ For example:
 
 .SH "EXAMPLES"
 This simple example demonstrates the execution of the command \fBhostname\fR
-in eight tasks. At least eight processors will be allocated to the job 
-(the same as the task count) on however many nodes are required to satisfy 
+in eight tasks. At least eight processors will be allocated to the job
+(the same as the task count) on however many nodes are required to satisfy
 the request. The output of each task will be proceeded with its task number.
 (The machine "dev" in the example below has a total of two CPUs per node)
 
@@ -1705,8 +1705,8 @@ dev[7\-10]
 
 .fi
 .PP
-The follwing script runs two job steps in parallel 
-within an allocated set of nodes. 
+The follwing script runs two job steps in parallel
+within an allocated set of nodes.
 
 .nf
 
@@ -1730,8 +1730,8 @@ STEPID     PARTITION     USER      TIME NODELIST
 .fi
 .PP
 This example demonstrates how one executes a simple MPICH job.
-We use \fBsrun\fR to build a list of machines (nodes) to be used by 
-\fBmpirun\fR in its required format. A sample command line and 
+We use \fBsrun\fR to build a list of machines (nodes) to be used by
+\fBmpirun\fR in its required format. A sample command line and
 the script to be executed follow.
 
 .nf
@@ -1752,11 +1752,11 @@ rm $MACHINEFILE
 
 > salloc \-N2 \-n4 test.sh
 
-.fi 
+.fi
 .PP
-This simple example demonstrates the execution of different jobs on different 
-nodes in the same srun.  You can do this for any number of nodes or any 
-number of jobs.  The executables are placed on the nodes sited by the 
+This simple example demonstrates the execution of different jobs on different
+nodes in the same srun.  You can do this for any number of nodes or any
+number of jobs.  The executables are placed on the nodes sited by the
 SLURM_NODEID env var.  Starting at 0 and going to the number specified on
 the srun commandline.
 
@@ -1773,23 +1773,23 @@ esac
 > srun \-N2 test.sh
 dev0
 is where I am running
-I am running on 
+I am running on
 dev1
 
 .fi
 .PP
-This example demonstrates use of multi\-core options to control layout 
-of tasks. 
-We request that four sockets per node and two cores per socket be 
-dedicated to the job. 
+This example demonstrates use of multi\-core options to control layout
+of tasks.
+We request that four sockets per node and two cores per socket be
+dedicated to the job.
 
 .nf
 
 > srun \-N2 \-B 4\-4:2\-2 a.out
 .fi
 .PP
-This example shows a script in which Slurm is used to provide resource 
-management for a job by executing the various job steps as processors 
+This example shows a script in which Slurm is used to provide resource
+management for a job by executing the various job steps as processors
 become available for their dedicated use.
 
 .nf
diff --git a/doc/man/man1/srun_cr.1 b/doc/man/man1/srun_cr.1
index ca7a320932aa34533d9b4cb71055a7b8f2e320c2..11378aa75966e14906119c8001bc963a7ef4f2fe 100644
--- a/doc/man/man1/srun_cr.1
+++ b/doc/man/man1/srun_cr.1
@@ -9,7 +9,7 @@ srun_cr \- run parallel jobs with checkpoint/restart support
 .SH DESCRIPTION
 The design of \fBsrun_cr\fR is inspired by \fBmpiexec_cr\fR from MVAPICH2 and
 \fBcr_restart\fR form BLCR.
-It is a wrapper around the \fBsrun\fR command to enable batch job 
+It is a wrapper around the \fBsrun\fR command to enable batch job
 checkpoint/restart support when used with SLURM's \fBcheckpoint/blcr\fR plugin.
 
 .SH "OPTIONS"
@@ -22,12 +22,12 @@ See "man srun" for details.
 After initialization, \fBsrun_cr\fR registers a thread context callback
 function.
 Then it forks a process and executes "cr_run \-\-omit srun" with its arguments.
-\fBcr_run\fR is employed to exclude the \fBsrun\fR process from being dumped 
+\fBcr_run\fR is employed to exclude the \fBsrun\fR process from being dumped
 upon checkpoint.
-All catchable signals except SIGCHLD sent to \fBsrun_cr\fR will be forwarded 
+All catchable signals except SIGCHLD sent to \fBsrun_cr\fR will be forwarded
 to the child \fBsrun\fR process.
 SIGCHLD will be captured to mimic the exit status of \fBsrun\fR when it exits.
-Then \fBsrun_cr\fR loops waiting for termination of tasks being launched 
+Then \fBsrun_cr\fR loops waiting for termination of tasks being launched
 from \fBsrun\fR.
 
 The step launch logic of SLURM is augmented to check if \fBsrun\fR is running
@@ -39,16 +39,16 @@ After launching the tasks, \fBsrun\fR tires to connect to the socket and sends
 the job ID, step ID and the nodes allocated to the step to \fBsrun_cr\fR.
 
 Upon checkpoint, \fRsrun_cr\fR checks to see if the tasks have been launched.
-If not \fRsrun_cr\fR first forwards the checkpoint request to the tasks by 
+If not \fRsrun_cr\fR first forwards the checkpoint request to the tasks by
 calling the SLURM API \fBslurm_checkpoint_tasks()\fR before dumping its process
 context.
 
-Upon restart, \fBsrun_cr\fR checks to see if the tasks have been previously 
-launched and checkpointed. 
+Upon restart, \fBsrun_cr\fR checks to see if the tasks have been previously
+launched and checkpointed.
 If true, the environment variable \fRSLURM_RESTART_DIR\fR is set to the directory
 of the checkpoint image files of the tasks.
-Then \fBsrun\fR is forked and executed again. 
-The environment variable will be used by the \fBsrun\fR command to restart 
+Then \fBsrun\fR is forked and executed again.
+The environment variable will be used by the \fBsrun\fR command to restart
 execution of the tasks from the previous checkpoint.
 
 .SH "COPYING"
diff --git a/doc/man/man1/sshare.1 b/doc/man/man1/sshare.1
index 2f8a890ba03bb115200fe7cd3a184aed7ebca95d..9d76c820d79fcc17e6f6c30f277ca9dbaecc7bdb 100644
--- a/doc/man/man1/sshare.1
+++ b/doc/man/man1/sshare.1
@@ -9,7 +9,7 @@ sshare \- Tool for listing the shares of associations to a cluster.
 .SH "DESCRIPTION"
 \fBsshare\fR is used to view SLURM share information.  This command is
 only viable when running with the priority/multifactor plugin.
-The sshare information is derived from a database with the interface 
+The sshare information is derived from a database with the interface
 being provided by \fBslurmdbd\fR (SLURM Database daemon) which is
 read in from the slurmctld and used to process the shares available
 to a given association.  sshare provides SLURM share information of
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index 27d30348fbd63026a5f7e9bc58580eaaf43a1b74..3501dcdfe2cfd0ec4ca373218d5c78623f13e141 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -5,103 +5,103 @@ sstat \- Display various status information
 of a running job/step.
 
 .SH "SYNOPSIS"
-.BR "sstat " 
-[\fIOPTIONS\fR...] 
+.BR "sstat "
+[\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
 .PP
 Status information for running jobs invoked with SLURM.
 .PP
-The 
+The
 .BR "sstat "
 command displays job status information for your analysis.
-The 
+The
 .BR "sstat "
 command displays information pertaining to CPU, Task, Node, Resident
 Set Size (RSS) and Virtual Memory (VM).
-You can tailor the output with the use of the 
-\f3\-\-fields=\fP 
+You can tailor the output with the use of the
+\f3\-\-fields=\fP
 option to specify the fields to be shown.
 .PP
-For the root user, the 
+For the root user, the
 .BR "sstat "
 command displays job status data for any job running on the system.
 .PP
-For the non\-root user, the 
+For the non\-root user, the
 .BR "sstat "
-command limits the display of job status data to jobs that were 
+command limits the display of job status data to jobs that were
 launched with their own user identifier (UID).
 
-.TP 
+.TP
 \f3\-a \fP\f3,\fP \f3\-\-allsteps\fP
 Print all steps for the given job(s) when no step is specified.
 
-.TP 
+.TP
 \f3\-e \fP\f3,\fP \f3\-\-helpformat\fP
 Print a list of fields that can be specified with the '\-\-format' option.
 
-.TP 
+.TP
 \f3\-h \fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
 
-.TP 
+.TP
 \f3\-j \fP\f3,\fP \f3\-\-jobs\fP
 Format is <job(.step)>. Stat this job step or comma-separated list of
 job steps. This option is required.  The step portion will default to
 step 0 if not specified, unless the \-\-allsteps flag is set where not
 specifing a step will result in all running steps to be displayed.
 
-.TP 
+.TP
 \f3\-n \fP\f3,\fP \f3\-\-noheader\fP
 No header will be added to the beginning of output. The default is to print a header.
 
-.TP 
+.TP
 \f3\-o \fP\f3,\fP \f3\-\-format\fP,\fP \f3\-\-fields\fP
 Comma seperated list of fields.
 (use '\-\-helpformat' for a list of available fields).
 
-.TP 
+.TP
 \f3\-p \fP\f3,\fP \f3\-\-parsable\fP
 output will be '|' delimited with a '|' at the end.
 
-.TP 
+.TP
 \f3\-P \fP\f3,\fP \f3\-\-parsable2\fP
 output will be '|' delimited without a '|' at the end
 
-.TP 
+.TP
 \f3\-\-usage\fP
 Display brief usage message.
 
-.TP 
+.TP
 \f3\-v\fP\f3,\fP \f3\-\-verbose\fP
 Primarily for debugging purposes, report the state of various variables during processing.
 
-.TP 
+.TP
 \f3\-V \fP\f3,\fP \f3\-\-version\fP
 Print version.
 
 
 .SS "Job Status Fields"
 The following are the field options:
-.RS 
+.RS
 .TP
 \f3AveCPU\fP
 
 
-.TP 
-\f3AvePages\fP 
+.TP
+\f3AvePages\fP
 
 
 .TP
-\f3AveRSS\fP 
+\f3AveRSS\fP
 
 
 .TP
 \f3AveVMSize\fP
 
 
-.TP 
-\f3JobID\fP 
+.TP
+\f3JobID\fP
 
 
 .TP
diff --git a/doc/man/man1/strigger.1 b/doc/man/man1/strigger.1
index a797e2399ad24aa6343317506ad059080981c2c8..46695b185aaeca7e422385fe8cac64105c2e67bb 100644
--- a/doc/man/man1/strigger.1
+++ b/doc/man/man1/strigger.1
@@ -12,37 +12,37 @@ strigger \- Used set, get or clear Slurm trigger information.
 
 .SH "DESCRIPTION"
 \fBstrigger\fR is used to set, get or clear Slurm trigger information.
-Triggers include events such as a node failing, a job reaching its 
+Triggers include events such as a node failing, a job reaching its
 time limit or a job terminating.
-These events can cause actions such as the execution of an arbitrary 
-script. 
-Typical uses include notifying system administrators of node failures 
+These events can cause actions such as the execution of an arbitrary
+script.
+Typical uses include notifying system administrators of node failures
 and gracefully terminating a job when it's time limit is approaching.
-A hostlist expression for the nodelist or job ID is passed as an argument 
+A hostlist expression for the nodelist or job ID is passed as an argument
 to the program.
 
-Trigger events are not processed instantly, but a check is performed for 
-trigger events on a periodic basis (currently every 15 seconds). 
-Any trigger events which occur within that interval will be compared 
-against the trigger programs set at the end of the time interval. 
+Trigger events are not processed instantly, but a check is performed for
+trigger events on a periodic basis (currently every 15 seconds).
+Any trigger events which occur within that interval will be compared
+against the trigger programs set at the end of the time interval.
 The trigger program will be executed once for any event occuring in
 that interval.
 The record of those events (e.g. nodes which went DOWN in the previous
 15 seconds) will then be cleared.
-The trigger program must set a new trigger before the end of the next 
+The trigger program must set a new trigger before the end of the next
 interval to insure that no trigger events are missed.
 If desired, multiple trigger programs can be set for the same event.
 
-\fBIMPORTANT NOTE:\fR This command can only set triggers if run by the 
+\fBIMPORTANT NOTE:\fR This command can only set triggers if run by the
 user \fISlurmUser\fR unless \fISlurmUser\fR is configured as user root.
 This is required for the \fIslurmctld\fR daemon to set the appropriate
-user and group IDs for the executed program. 
-Also note that the program is executed on the same node that the 
+user and group IDs for the executed program.
+Also note that the program is executed on the same node that the
 \fIslurmctld\fR daemon uses rather than some allocated compute node.
 To check the value of \fISlurmUser\fR, run the command:
 
-\fIscontrol show config | grep SlurmUser\fR 
- 
+\fIscontrol show config | grep SlurmUser\fR
+
 .SH "ARGUMENTS"
 .TP
 \fB\-\-block_err\fP
@@ -52,7 +52,7 @@ Trigger an event when a BlueGene block enters an ERROR state.
 \fB\-\-clear\fP
 Clear or delete a previously defined event trigger.
 The \fB\-\-id\fR, \fB\-\-jobid\fR or \fB\-\-userid\fR
-option must be specified to identify the trigger(s) to 
+option must be specified to identify the trigger(s) to
 be cleared.
 
 .TP
@@ -85,28 +85,28 @@ Trigger ID number.
 \fB\-I\fR, \fB\-\-idle\fR
 Trigger an event if the specified node remains in an IDLE state
 for at least the time period specified by the \fB\-\-offset\fR
-option. This can be useful to hibernate a node that remains idle, 
+option. This can be useful to hibernate a node that remains idle,
 thus reducing power consumption.
 
 .TP
 \fB\-j\fR, \fB\-\-jobid\fR=\fIid\fR
 Job ID of interest.
-\fBNOTE:\fR The \fB\-\-jobid\fR option can not be used in conjunction 
-with the \fB\-\-node\fR option. When the \fB\-\-jobid\fR option is 
-used in conjunction with the \fB\-\-up\fR or \fB\-\-down\fR option, 
-all nodes allocated to that job will considered the nodes used as a 
+\fBNOTE:\fR The \fB\-\-jobid\fR option can not be used in conjunction
+with the \fB\-\-node\fR option. When the \fB\-\-jobid\fR option is
+used in conjunction with the \fB\-\-up\fR or \fB\-\-down\fR option,
+all nodes allocated to that job will considered the nodes used as a
 trigger event.
 
 .TP
 \fB\-n\fR, \fB\-\-node\fR[=\fIhost\fR]
-Host name(s) of interest. 
-By default, all nodes associated with the job (if \fB\-\-jobid\fR 
+Host name(s) of interest.
+By default, all nodes associated with the job (if \fB\-\-jobid\fR
 is specified) or on the system are considered for event triggers.
-\fBNOTE:\fR The \fB\-\-node\fR option can not be used in conjunction 
-with the \fB\-\-jobid\fR option. When the \fB\-\-jobid\fR option is 
-used in conjunction with the \fB\-\-up\fR, \fB\-\-down\fR or 
-\fB\-\-drained\fR option, 
-all nodes allocated to that job will considered the nodes used as a 
+\fBNOTE:\fR The \fB\-\-node\fR option can not be used in conjunction
+with the \fB\-\-jobid\fR option. When the \fB\-\-jobid\fR option is
+used in conjunction with the \fB\-\-up\fR, \fB\-\-down\fR or
+\fB\-\-drained\fR option,
+all nodes allocated to that job will considered the nodes used as a
 trigger event.
 
 .TP
@@ -114,8 +114,8 @@ trigger event.
 The specified action should follow the event by this time interval.
 Specify a negative value if action should preceded the event.
 The default value is zero if no \fB\-\-offset\fR option is specified.
-The resolution of this time is about 20 seconds, so to execute 
-a script not less than five minutes prior to a job reaching its 
+The resolution of this time is about 20 seconds, so to execute
+a script not less than five minutes prior to a job reaching its
 time limit, specify \fB\-\-offset=320\fR (5 minutes plus 20 seconds).
 
 .TP
@@ -123,12 +123,12 @@ time limit, specify \fB\-\-offset=320\fR (5 minutes plus 20 seconds).
 Execute the program at the specified fully qualified pathname
 when the event occurs.
 The program will be executed as the user who sets the trigger.
-If the program fails to terminate within 5 minutes, it will 
+If the program fails to terminate within 5 minutes, it will
 be killed along with any spawned processes.
 
 .TP
 \fB\-Q\fR, \fB\-\-quiet\fR
-Do not report non\-fatal errors. 
+Do not report non\-fatal errors.
 This can be useful to clear triggers which may have already been purged.
 
 .TP
@@ -138,9 +138,9 @@ Trigger an event when the system configuration changes.
 .TP
 \fB\-\-set\fP
 Register an event trigger based upon the supplied options.
-NOTE: An event is only triggered once. A new event trigger 
-must be set established for future events of the same type 
-to be processed. 
+NOTE: An event is only triggered once. A new event trigger
+must be set established for future events of the same type
+to be processed.
 
 .TP
 \fB\-t\fR, \fB\-\-time\fR
@@ -159,7 +159,7 @@ Specify either a user name or user ID.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Print detailed event logging. This includes time\-stamps on data structures, 
+Print detailed event logging. This includes time\-stamps on data structures,
 record counts, etc.
 
 .TP
@@ -181,13 +181,13 @@ Resource ID: job ID or host names or "*" for any host
 
 .TP
 \fBTYPE\fP
-Trigger type: \fItime\fR or \fIfini\fR (for jobs only), 
+Trigger type: \fItime\fR or \fIfini\fR (for jobs only),
 \fIdown\fR or \fIup\fR (for jobs or nodes), or
 \fIdrained\fR, \fIidle\fR or \fIreconfig\fR (for nodes only)
 
 .TP
 \fBOFFSET\fP
-Time offset in seconds. Negative numbers indicated the action should 
+Time offset in seconds. Negative numbers indicated the action should
 occur before the event (if possible)
 
 .TP
@@ -199,7 +199,7 @@ Name of the user requesting the action
 Pathname of the program to execute when the event occurs
 
 .SH "EXAMPLES"
-Execute the program "/usr/sbin/slurm_admin_notify" whenever 
+Execute the program "/usr/sbin/slurm_admin_notify" whenever
 any node in the cluster goes down. The subject line will include
 the node names which have entered the down state (passed as an
 argument to the script by SLURM).
@@ -228,7 +228,7 @@ any node in the cluster remains in the idle state for at least
 .fi
 
 .PP
-Execute the program "/home/joe/clean_up" when job 1234 is within 
+Execute the program "/home/joe/clean_up" when job 1234 is within
 10 minutes of reaching its time limit.
 
 .nf
diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1
index 1f8f9f59b49b99df27671547303d13d9021d578c..fa63859e335f497a132fd666e4a1c66ff79ae5d7 100644
--- a/doc/man/man1/sview.1
+++ b/doc/man/man1/sview.1
@@ -1,45 +1,45 @@
 .TH "sview" "1" "SLURM 2.0" "July 2009" "SLURM Commands"
 .SH "NAME"
-.LP 
+.LP
 sview \- graphical user interface to view and modify SLURM state.
 
 .SH "SYNOPSIS"
-.LP 
+.LP
 sview
 
 .SH "DESCRIPTION"
-.LP 
-sview can be used to view SLURM configuration, job, 
-step, node and partitions state information. 
+.LP
+sview can be used to view SLURM configuration, job,
+step, node and partitions state information.
 Authorized users can also modify select information.
 .LP
 The primary display modes are \fIJobs\fR and \fIPartitions\fR, each with a selection tab.
-There is also an optional map of the nodes on the left side of the window which 
+There is also an optional map of the nodes on the left side of the window which
 will show the nodes associated with each job or partition.
-Left\-click on the tab of the display you would like to see. 
+Left\-click on the tab of the display you would like to see.
 Right\-click on the tab in order to control which fields will be displayed.
 .LP
-Within the display window, left\-click on the header to control the sort 
+Within the display window, left\-click on the header to control the sort
 order of entries (e.g. increasing or decreasing) in the diplay.
 You can also left\-click and drag the headers to move them right or left in the display.
-If a JobID has an arrow next to it, click on that arrow to display or hide 
-information about that job's steps. 
+If a JobID has an arrow next to it, click on that arrow to display or hide
+information about that job's steps.
 Right\-click on a line of the display to get more information about the record.
 .LP
-There is an \fIAdmin Mode\fR option which permits the user root to modify many of 
+There is an \fIAdmin Mode\fR option which permits the user root to modify many of
 the fields displayed, such as node state or job time limit.
 In the mode, a \fISLURM Reconfigure\fR Action is also available.
-It is recommended that \fIAdmin Mode\fR be used only while modifications are 
-actively being made. 
-Disable \fIAdmin Mode\fR immediately after the changes to avoid possibly making 
-unintended changes. 
+It is recommended that \fIAdmin Mode\fR be used only while modifications are
+actively being made.
+Disable \fIAdmin Mode\fR immediately after the changes to avoid possibly making
+unintended changes.
 
 .SH "NOTES"
-The sview command can only be build if \fIgtk+\-2.0\fR is installed. 
+The sview command can only be build if \fIgtk+\-2.0\fR is installed.
 Systems lacking these libraries will have SLURM installed without
 the sview command.
 
-On larger systems (2000+ nodes) some gtk themes can considerably slow down 
+On larger systems (2000+ nodes) some gtk themes can considerably slow down
 the grid display.  If you think this is happening you may
 try defining SVIEW_GRID_SPEEDUP=1 in your environment.  This will use
 a code path to try to avoid functions that typically take a
@@ -68,6 +68,6 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
-sinfo(1), squeue(1), scontrol(1), slurm.conf(5), 
+.LP
+sinfo(1), squeue(1), scontrol(1), slurm.conf(5),
 sched_setaffinity(2), numa(3)
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index 6f18756e27031c39fb0d91c0764957af9ad0f242..8f227ae1fa5c07698a05cb7d32d58df755f969ee 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -3,44 +3,44 @@
 slurm_allocate_resources, slurm_allocate_resources_blocking,
 slurm_allocation_msg_thr_create, slurm_allocation_msg_thr_destroy,
 slurm_allocation_lookup, slurm_allocation_lookup_lite,
-slurm_confirm_allocation, 
-slurm_free_submit_response_response_msg, slurm_init_job_desc_msg, 
+slurm_confirm_allocation,
+slurm_free_submit_response_response_msg, slurm_init_job_desc_msg,
 slurm_job_will_run, slurm_read_hostfile, slurm_submit_batch_job
 \- Slurm job initiation functions
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
-.LP 
+.LP
 int \fBslurm_allocate_resources\fR (
-.br 
+.br
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
-.br 
+.br
 	resource_allocation_response_msg_t **\fIslurm_alloc_msg_pptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 resource_allocation_response_msg_t *\fBslurm_allocate_resources_blocking\fR (
-.br 
+.br
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
-.br 
+.br
 	time_t \fItimeout\fP, void \fI(*pending_callback)(uint32_t job_id)\fP
-.br 
+.br
 );
-.LP 
+.LP
 allocation_msg_thread_t *\fBslurm_allocation_msg_thr_create\fR (
-.br 
+.br
 	uint16_t *\fIport\fP,
-.br 
+.br
 	slurm_allocation_callbacks_t *\fIcallbacks\fP
-.br 
+.br
 );
-.LP 
+.LP
 void *\fBslurm_allocation_msg_thr_destroy\fR (
-.br 
+.br
 	allocation_msg_thread_t *\fIslurm_alloc_msg_thr_ptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 int \fBslurm_allocation_lookup\fR {
 .br
 	uint32_t \fIjobid\fP,
@@ -48,7 +48,7 @@ int \fBslurm_allocation_lookup\fR {
 	resource_allocation_response_msg_t **\fIslurm_alloc_msg_pptr\fP
 .br
 );
-.LP 
+.LP
 int \fBslurm_allocation_lookup_lite\fR {
 .br
 	uint32_t \fIjobid\fP,
@@ -56,106 +56,106 @@ int \fBslurm_allocation_lookup_lite\fR {
 	resource_allocation_response_msg_t **\fIslurm_alloc_msg_pptr\fP
 .br
 );
-.LP 
+.LP
 int \fBslurm_confirm_allocation\fR (
-.br 
+.br
 	old_job_alloc_msg_t *\fIold_job_desc_msg_ptr\fP,
-.br 
+.br
 	resource_allocation_response_msg_t **\fIslurm_alloc_msg_pptr\fP
-.br 
+.br
 );
 .LP
-void \fBslurm_free_resource_allocation_response_msg\fR ( 
-.br 
-	resource_allocation_response_msg_t *\fIslurm_alloc_msg_ptr\fP 
-.br 
+void \fBslurm_free_resource_allocation_response_msg\fR (
+.br
+	resource_allocation_response_msg_t *\fIslurm_alloc_msg_ptr\fP
+.br
 );
 .LP
-void \fBslurm_free_submit_response_response_msg\fR ( 
-.br 
-	submit_response_msg_t *\fIslurm_submit_msg_ptr\fP 
-.br 
+void \fBslurm_free_submit_response_response_msg\fR (
+.br
+	submit_response_msg_t *\fIslurm_submit_msg_ptr\fP
+.br
 );
 .LP
 void \fBslurm_init_job_desc_msg\fR (
-.br 
+.br
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_job_will_run\fR (
-.br 
+.br
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
-.br 
+.br
 );
 .LP
 int \fBslurm_read_hostfile\fR (
-.br 
+.br
 	char *\fIfilename\fP, int \fIn\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_submit_batch_job\fR (
-.br 
+.br
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
-.br 
-	submit_response_msg_t **\fIslurm_submit_msg_pptr\fP 
-.br 
+.br
+	submit_response_msg_t **\fIslurm_submit_msg_pptr\fP
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIjob_desc_msg_ptr\fP
-Specifies the pointer to a job request specification. See slurm.h for full details 
-on the data structure's contents. 
+Specifies the pointer to a job request specification. See slurm.h for full details
+on the data structure's contents.
 .TP
 \fIcallbacks\fP
 Specifies the pointer to a allocation callbacks structure.  See
 slurm.h for full details on the data structure's contents.
-.TP 
+.TP
 \fIold_job_desc_msg_ptr\fP
-Specifies the pointer to a description of an existing job. See slurm.h for 
-full details on the data structure's contents. 
-.TP 
+Specifies the pointer to a description of an existing job. See slurm.h for
+full details on the data structure's contents.
+.TP
 \fIslurm_alloc_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with a 
-description of the created resource allocation (job): job ID, list of allocated nodes, 
-processor count per allocated node, etc. See slurm.h for full details on the data 
-structure's contents. 
-.TP 
+Specifies the double pointer to the structure to be created and filled with a
+description of the created resource allocation (job): job ID, list of allocated nodes,
+processor count per allocated node, etc. See slurm.h for full details on the data
+structure's contents.
+.TP
 \fIslurm_alloc_msg_ptr\fP
-Specifies the pointer to the structure to be created and filled in by the function 
+Specifies the pointer to the structure to be created and filled in by the function
 \fIslurm_allocate_resources\fP,
 \fIslurm_allocate_resources_blocking\fP,
-\fIslurm_allocation_lookup\fP, \fIslurm_allocation_lookup_lite\fP, 
+\fIslurm_allocation_lookup\fP, \fIslurm_allocation_lookup_lite\fP,
 \fIslurm_confirm_allocation\fP or \fIslurm_job_will_run\fP.
-.TP 
+.TP
 \fIslurm_alloc_msg_thr_ptr\fP
 Specigies the pointer to the structure created and returned by the
 function \fIslurm_allocation_msg_thr_create\fP.  Must be destroyed
 with function \fIslurm_allocation_msg_thr_destroy\fP.
-.TP 
+.TP
 \fIslurm_submit_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with a description 
+Specifies the double pointer to the structure to be created and filled with a description
 of the created job: job ID, etc. See slurm.h for full details on the
-data structure's contents. 
+data structure's contents.
 .TP
 \fIslurm_submit_msg_ptr\fP
 Specifies the pointer to the structure to be created and filled in by the function \fIslurm_submit_batch_job\fP.
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_allocate_resources\fR Request a resource allocation for a job. If 
-successful, a job entry is created. Note that if the job's requested node 
-count or time allocation are outside of the partition's limits then a job 
-entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left 
+.LP
+\fBslurm_allocate_resources\fR Request a resource allocation for a job. If
+successful, a job entry is created. Note that if the job's requested node
+count or time allocation are outside of the partition's limits then a job
+entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left
 queued until the partition's limits are changed.
-Always release the response message when no longer required using 
+Always release the response message when no longer required using
 the function \fBslurm_free_resource_allocation_response_msg\fR.  This
 function only makes the request once.  If the allocation is not
 avaliable immediately the node_cnt variable in the resp will be 0.  If
 you want a function that will block until either an error is recieved
 or an allocation is granted you can use the
-\fIslurm_allocate_resources_blocking\fP function described below. 
+\fIslurm_allocate_resources_blocking\fP function described below.
 .LP
 \fBslurm_allocate_resources_blocking\fR Request a resource allocation for a
 job.  This call will block until the allocation is granted, an error
@@ -173,38 +173,38 @@ message is recieved from the controller.  This message thread is
 needed to receive messages from the controller about node failure in
 an allocation and other important messages.  Although technically not
 required, it could be very helpful to inform about problems with the
-allocation. 
+allocation.
 .LP
 \fBslurm_allocation_msg_thr_destroy\fR Shutdown the message handler
  talking with the controller dealing with messages from the controller during
- an allocation. 
+ an allocation.
 .LP
-\fBslurm_confirm_allocation\fR Return detailed information on a specific 
+\fBslurm_confirm_allocation\fR Return detailed information on a specific
 existing job allocation. \fBOBSOLETE FUNCTION: Use slurm_allocation_lookup
-instead.\fR This function may only be successfully executed by the job's 
+instead.\fR This function may only be successfully executed by the job's
 owner or user root.
-.LP 
-\fBslurm_free_resource_allocation_response_msg\fR Release the storage generated in response 
-to a call of the function \fBslurm_allocate_resources\fR, 
+.LP
+\fBslurm_free_resource_allocation_response_msg\fR Release the storage generated in response
+to a call of the function \fBslurm_allocate_resources\fR,
 \fBslurm_allocation_lookup\fR, or \fBslurm_allocation_lookup_lite\fR.
-.LP 
-\fBslurm_free_submit_response_msg\fR Release the storage generated in response 
+.LP
+\fBslurm_free_submit_response_msg\fR Release the storage generated in response
 to a call of the function \fBslurm_submit_batch_job\fR.
-.LP 
-\fBslurm_init_job_desc_msg\fR Initialize the contents of a job descriptor with default values. 
+.LP
+\fBslurm_init_job_desc_msg\fR Initialize the contents of a job descriptor with default values.
 Execute this function before issuing a request to submit or modify a job.
-.LP 
-\fBslurm_job_will_run\fR Determine if the supplied job description could be executed immediately. 
-.LP 
+.LP
+\fBslurm_job_will_run\fR Determine if the supplied job description could be executed immediately.
+.LP
 \fBslurm_read_hostfile\fR Read a SLURM hostfile specified by
 "filename".  "filename" must contain a list of SLURM NodeNames, one
 per line.  Reads up to "n" number of hostnames from the file. Returns
 a string representing a hostlist ranged string of the contents
 of the file.  This is a helper function, it does not contact any SLURM
-daemons.   
-.LP 
-\fBslurm_submit_batch_job\fR Submit a job for later execution. Note that if 
-the job's requested node count or time allocation are outside of the partition's limits then a job entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left queued until the partition's limits are changed and resources are available.  Always release the response message when no 
+daemons.
+.LP
+\fBslurm_submit_batch_job\fR Submit a job for later execution. Note that if
+the job's requested node count or time allocation are outside of the partition's limits then a job entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left queued until the partition's limits are changed and resources are available.  Always release the response message when no
 longer required using the function \fBslurm_free_submit_response_msg\fR.
 .SH "RETURN VALUE"
 .LP
@@ -225,53 +225,53 @@ On success, zero is returned. On error, \-1 is returned, and Slurm error code is
 .LP
 \fBESLURM_TOO_MANY_REQUESTED_NODES\fR the job requested use of more nodes than can be made available to in the requested (or default) partition.
 .LP
-\fBESLURM_ERROR_ON_DESC_TO_RECORD_COPY\fR unable to create the job due to internal resources being exhausted. Try again later. 
+\fBESLURM_ERROR_ON_DESC_TO_RECORD_COPY\fR unable to create the job due to internal resources being exhausted. Try again later.
 .LP
-\fBESLURM_JOB_MISSING_SIZE_SPECIFICATION\fR the job failed to specify some size specification. At least one of the following must be supplied: required processor count, required node count, or required node list. 
+\fBESLURM_JOB_MISSING_SIZE_SPECIFICATION\fR the job failed to specify some size specification. At least one of the following must be supplied: required processor count, required node count, or required node list.
 .LP
-\fBESLURM_JOB_SCRIPT_MISSING\fR failed to identify executable program to be queued. 
+\fBESLURM_JOB_SCRIPT_MISSING\fR failed to identify executable program to be queued.
 .LP
-\fBESLURM_USER_ID_MISSING\fR identification of the job's owner was not provided. 
+\fBESLURM_USER_ID_MISSING\fR identification of the job's owner was not provided.
 .LP
-\fBESLURM_DUPLICATE_JOB_ID\fR the requested job id is already in use. 
+\fBESLURM_DUPLICATE_JOB_ID\fR the requested job id is already in use.
 .LP
-\fBESLURM_NOT_TOP_PRIORITY\fR job can not be started immediately because higher priority jobs are waiting to use this partition. 
+\fBESLURM_NOT_TOP_PRIORITY\fR job can not be started immediately because higher priority jobs are waiting to use this partition.
 .LP
-\fBESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE\fR the requested node configuration is not available (at least not in sufficient quantity) to satisfy the request. 
+\fBESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE\fR the requested node configuration is not available (at least not in sufficient quantity) to satisfy the request.
 .LP
-\fBESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE\fR the requested partition 
-configuration is not available to satisfy the request. This is not a fatal 
-error, but indicates that the job will be left queued until the partition's 
-configuration is changed. This typically indicates that the job's requested 
-node count is outside of the node count range its partition is configured 
-to support (e.g. the job wants 64 nodes and the partition will only schedule 
-jobs using between 1 and 32 nodes). Alternately, the job's time limit exceeds 
-the partition's time limit. 
+\fBESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE\fR the requested partition
+configuration is not available to satisfy the request. This is not a fatal
+error, but indicates that the job will be left queued until the partition's
+configuration is changed. This typically indicates that the job's requested
+node count is outside of the node count range its partition is configured
+to support (e.g. the job wants 64 nodes and the partition will only schedule
+jobs using between 1 and 32 nodes). Alternately, the job's time limit exceeds
+the partition's time limit.
 .LP
-\fBESLURM_NODES_BUSY\fR the requested nodes are already in use. 
+\fBESLURM_NODES_BUSY\fR the requested nodes are already in use.
 .LP
-\fBESLURM_INVALID_FEATURE\fR the requested feature(s) does not exist. 
+\fBESLURM_INVALID_FEATURE\fR the requested feature(s) does not exist.
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
-\fBESLURM_INVALID_NODE_NAME\fR the requested node name(s) is/are not valid. 
+\fBESLURM_INVALID_NODE_NAME\fR the requested node name(s) is/are not valid.
 .LP
-\fBESLURM_INVALID_PARTITION_NAME\fR the requested partition name is not valid. 
+\fBESLURM_INVALID_PARTITION_NAME\fR the requested partition name is not valid.
 .LP
-\fBESLURM_TRANSITION_STATE_NO_UPDATE\fR the requested job configuration change can not take place at this time. Try again later. 
+\fBESLURM_TRANSITION_STATE_NO_UPDATE\fR the requested job configuration change can not take place at this time. Try again later.
 .LP
-\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified. 
+\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job).
 .LP
-\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect. 
+\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
-\fBESLURM_BAD_DIST\fR task distribution specification is invalid. 
+\fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "NON-BLOCKING EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <stdlib.h>
@@ -281,25 +281,25 @@ SLURM controller.
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	job_desc_msg_t job_desc_msg;
-.br 
+.br
 	resource_allocation_response_msg_t* slurm_alloc_msg_ptr ;
-.LP 
+.LP
 	slurm_init_job_desc_msg( &job_desc_msg );
-.br 
+.br
 	job_desc_msg. name = ("job01\0");
-.br 
+.br
 	job_desc_msg. job_min_memory = 1024;
-.br 
+.br
 	job_desc_msg. time_limit = 200;
-.br 
+.br
 	job_desc_msg. min_nodes = 400;
-.br 
+.br
 	job_desc_msg. user_id = getuid();
 .br
 	job_desc_msg. group_id = getgid();
@@ -309,39 +309,39 @@ int main (int argc, char *argv[])
 	                             &slurm_alloc_msg_ptr)) {
 .br
 		slurm_perror ("slurm_allocate_resources error");
-.br 
+.br
 		exit (1);
 .br
 	}
-.br 
-	printf ("Allocated nodes %s to job_id %u\\n", 
-.br 
-	        slurm_alloc_msg_ptr\->node_list, 
+.br
+	printf ("Allocated nodes %s to job_id %u\\n",
+.br
+	        slurm_alloc_msg_ptr\->node_list,
 .br
 	        slurm_alloc_msg_ptr\->job_id );
-.br 
+.br
 	if (slurm_kill_job(slurm_alloc_msg_ptr\->job_id, SIGKILL, 0)) {
-.br 
+.br
 		printf ("kill errno %d\\n", slurm_get_errno());
-.br 
+.br
 		exit (1);
-.br 
+.br
 	}
 .br
-	printf ("canceled job_id %u\\n", 
+	printf ("canceled job_id %u\\n",
 .br
 	        slurm_alloc_msg_ptr\->job_id );
-.br 
+.br
 	slurm_free_resource_allocation_response_msg(
 .br
 			slurm_alloc_msg_ptr);
-.br 
+.br
 	exit (0);
-.br 
+.br
 }
 
 .SH "BLOCKING EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <stdlib.h>
@@ -351,30 +351,30 @@ int main (int argc, char *argv[])
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	job_desc_msg_t job_desc_msg;
-.br 
+.br
 	resource_allocation_response_msg_t* slurm_alloc_msg_ptr ;
-.LP 
+.LP
 	slurm_init_job_desc_msg( &job_desc_msg );
-.br 
+.br
 	job_desc_msg. name = ("job01\0");
-.br 
+.br
 	job_desc_msg. job_min_memory = 1024;
-.br 
+.br
 	job_desc_msg. time_limit = 200;
-.br 
+.br
 	job_desc_msg. min_nodes = 400;
-.br 
+.br
 	job_desc_msg. user_id = getuid();
 .br
 	job_desc_msg. group_id = getgid();
 .br
-	if (!(slurm_alloc_msg_ptr = 
+	if (!(slurm_alloc_msg_ptr =
 .br
 	      slurm_allocate_resources_blocking(&job_desc_msg, 0, NULL))) {
 .br
@@ -383,35 +383,35 @@ int main (int argc, char *argv[])
 		exit (1);
 .br
 	}
-.br 
-	printf ("Allocated nodes %s to job_id %u\\n", 
-.br 
-	        slurm_alloc_msg_ptr\->node_list, 
+.br
+	printf ("Allocated nodes %s to job_id %u\\n",
+.br
+	        slurm_alloc_msg_ptr\->node_list,
 .br
 	        slurm_alloc_msg_ptr\->job_id );
-.br 
+.br
 	if (slurm_kill_job(slurm_alloc_msg_ptr\->job_id, SIGKILL, 0)) {
-.br 
+.br
 		printf ("kill errno %d\\n", slurm_get_errno());
-.br 
+.br
 		exit (1);
-.br 
+.br
 	}
 .br
-	printf ("canceled job_id %u\\n", 
+	printf ("canceled job_id %u\\n",
 .br
 	        slurm_alloc_msg_ptr\->job_id );
-.br 
+.br
 	slurm_free_resource_allocation_response_msg(
 .br
 			slurm_alloc_msg_ptr);
-.br 
+.br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -433,8 +433,8 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBhostlist_create\fR(3), \fBhostlist_shift\fR(3), \fBhostlist_destroy\fR(3), 
-\fBscancel\fR(1), \fBsrun\fR(1), \fBslurm_free_job_info_msg\fR(3), 
-\fBslurm_get_errno\fR(3), \fBslurm_load_jobs\fR(3), 
+.LP
+\fBhostlist_create\fR(3), \fBhostlist_shift\fR(3), \fBhostlist_destroy\fR(3),
+\fBscancel\fR(1), \fBsrun\fR(1), \fBslurm_free_job_info_msg\fR(3),
+\fBslurm_get_errno\fR(3), \fBslurm_load_jobs\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
diff --git a/doc/man/man3/slurm_checkpoint_error.3 b/doc/man/man3/slurm_checkpoint_error.3
index a99912742cddf2a1ea1f87c92547a28bf48e1672..6c8c01e1ac29452b4e752fa93e9faa5a766f3aa5 100644
--- a/doc/man/man3/slurm_checkpoint_error.3
+++ b/doc/man/man3/slurm_checkpoint_error.3
@@ -2,11 +2,11 @@
 
 .SH "NAME"
 slurm_checkpoint_able, slurm_checkpoint_complete, slurm_checkpoint_create,
-slurm_checkpoint_disable, slurm_checkpoint_enable, slurm_checkpoint_error, 
+slurm_checkpoint_disable, slurm_checkpoint_enable, slurm_checkpoint_error,
 slurm_checkpoint_restart, slurm_checkpoint_vacate \- Slurm checkpoint functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
 .LP
 .LP
@@ -117,7 +117,7 @@ int \fBslurm_checkpoint_vacate\fR (
 );
 
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fIbegin_time\fP
 When to begin the operation.
@@ -126,11 +126,11 @@ When to begin the operation.
 Error code for checkpoint operation. Only the highest value is preserved.
 .TP
 \fIerror_msg\fP
-Error message for checkpoint operation. Only the \fIerror_msg\fP value for the highest 
+Error message for checkpoint operation. Only the \fIerror_msg\fP value for the highest
 \fIerror_code\fP is preserved.
 .TP
 \fIimage_dir\fP
-Directory specification for where the checkpoint file should be read from or 
+Directory specification for where the checkpoint file should be read from or
 written to. The default value is specified by the \fIJobCheckpointDir\fP
 SLURM configuration parameter.
 .TP
@@ -147,7 +147,7 @@ Nodes to send the request.
 Time at which last checkpoint operation began (if one is in progress), otherwise zero.
 .TP
 \fIstep_id\fP
-SLURM job step ID to perform the operation upon. 
+SLURM job step ID to perform the operation upon.
 May be NO_VAL if the operation is to be performed on all steps of the specified job.
 Specify SLURM_BATCH_SCRIPT to checkpoint a batch job.
 .TP
@@ -158,19 +158,19 @@ If non\-zero then restart the job on the same nodes that it was checkpointed fro
 .LP
 \fBslurm_checkpoint_able\fR
 Report if checkpoint operations can presently be issued for the specified job step.
-If yes, returns SLURM_SUCCESS and sets \fIstart_time\fP if checkpoint operation is 
+If yes, returns SLURM_SUCCESS and sets \fIstart_time\fP if checkpoint operation is
 presently active. Returns ESLURM_DISABLED if checkpoint operation is disabled.
 .LP
 \fBslurm_checkpoint_complete\fR
 Note that a requested checkpoint has been completed.
 .LP
 \fBslurm_checkpoint_create\fR
-Request a checkpoint for the identified job step. 
+Request a checkpoint for the identified job step.
 Continue its execution upon completion of the checkpoint.
 .LP
 \fBslurm_checkpoint_disable\fR
-Make the identified job step non\-checkpointable. 
-This can be issued as needed to prevent checkpointing while 
+Make the identified job step non\-checkpointable.
+This can be issued as needed to prevent checkpointing while
 a job step is in a critical section or for other reasons.
 .LP
 \fBslurm_checkpoint_enable\fR
@@ -181,9 +181,9 @@ Get error information about the last checkpoint operation for a given job step.
 .LP
 \fBslurm_checkpoint_restart\fR
 Request that a previously checkpointed job resume execution.
-It may continue execution on different nodes than were 
+It may continue execution on different nodes than were
 originally used.
-Execution may be delayed if resources are not immediately 
+Execution may be delayed if resources are not immediately
 available.
 .LP
 \fBslurm_checkpoint_vacate\fR
@@ -193,14 +193,14 @@ Terminate its execution upon completion of the checkpoint.
 
 .SH "RETURN VALUE"
 .LP
-Zero is returned upon success. 
+Zero is returned upon success.
 On error, \-1 is returned, and the Slurm error code is set appropriately.
 .SH "ERRORS"
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job or job step id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job or job step id does not exist.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested 
-action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested
+action (e.g. trying to delete or modify another user's job).
 .LP
 \fBESLURM_JOB_PENDING\fR the requested job is still pending.
 .LP
@@ -212,7 +212,7 @@ This will occur when a request for checkpoint is issued when they have been disa
 \fBESLURM_NOT_SUPPORTED\fR the requested operation is not supported on this system.
 
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <stdlib.h>
@@ -220,11 +220,11 @@ This will occur when a request for checkpoint is issued when they have been disa
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	uint32_t job_id, step_id;
 .LP
 	if (argc < 3) {
@@ -242,17 +242,17 @@ int main (int argc, char *argv[])
 	if (slurm_checkpoint_disable(job_id, step_id)) {
 .br
 		slurm_perror ("slurm_checkpoint_error:");
-.br 
+.br
 		exit (1);
 .br
 	}
-.br 
+.br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -276,5 +276,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
+.LP
 \fBsrun\fR(1), \fBsqueue\fR(1), \fBfree\fR(3), \fBslurm.conf\fR(5)
diff --git a/doc/man/man3/slurm_complete_job.3 b/doc/man/man3/slurm_complete_job.3
index 882d926c0f822087af3072404945e6c20e786212..611cd1281e22f92a524dea9c383946d09d76480f 100644
--- a/doc/man/man3/slurm_complete_job.3
+++ b/doc/man/man3/slurm_complete_job.3
@@ -2,53 +2,53 @@
 .SH "NAME"
 slurm_complete_job \- Slurm job completion call
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
 .LP
 int \fBslurm_complete_job\fR (
-.br 
-	uint32_t \fIjob_id\fP, 
+.br
+	uint32_t \fIjob_id\fP,
 .br
 	uint32_t \fIjob_return_code\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIjob_id\fP
 Slurm job id number.
-.TP 
+.TP
 \fIjob_return_code\fP
 Exit code of the program executed.
 
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_complete_job\fR Note the termination of a job. This function may only be 
+.LP
+\fBslurm_complete_job\fR Note the termination of a job. This function may only be
 successfully executed by the job's owner or user root.
 
 .SH "RETURN VALUE"
 .LP
-On success, zero is returned. On error, \-1 is returned, and Slurm error code 
+On success, zero is returned. On error, \-1 is returned, and Slurm error code
 is set appropriately.
 
 .SH "ERRORS"
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
-\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can 
-not be modified. 
+\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can
+not be modified.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job).
 .LP
-\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect. 
+\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -71,5 +71,5 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
+.LP
 \fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
diff --git a/doc/man/man3/slurm_free_ctl_conf.3 b/doc/man/man3/slurm_free_ctl_conf.3
index aa9091b80dac74072e6899385899f23da7848e0b..6d1ec7acb918d0dacaa436d011f3db5d24907c3c 100644
--- a/doc/man/man3/slurm_free_ctl_conf.3
+++ b/doc/man/man3/slurm_free_ctl_conf.3
@@ -1,74 +1,74 @@
 .TH "Slurm API" "3" "April 2007" "Morris Jette" "Slurm informational calls"
 
 .SH "NAME"
-slurm_free_ctl_conf, slurm_load_ctl_conf, 
+slurm_free_ctl_conf, slurm_load_ctl_conf,
 slurm_print_ctl_conf \- Slurm information reporting functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .LP
 long \fBslurm_api_version\fR ();
-.LP 
+.LP
 void \fBslurm_free_ctl_conf\fR (
-.br 
+.br
 	slurm_ctl_conf_t *\fIconf_info_msg_ptr\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_load_ctl_conf\fR (
-.br 
+.br
 	time_t \fIupdate_time\fP,
-.br 
+.br
 	slurm_ctl_conf_t **\fIconf_info_msg_pptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_ctl_conf\fR (
 .br
 	FILE *\fIout_file\fp,
 .br
 	slurm_ctl_conf_t *\fIconf_info_msg_ptr\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIconf_info_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with the 
-time of the last configuration update and detailed configuration information. 
-Configuration information includes control machine names, file names, timer 
-values, etc. See slurm.h for full details on the data structure's contents. 
-.TP 
+Specifies the double pointer to the structure to be created and filled with the
+time of the last configuration update and detailed configuration information.
+Configuration information includes control machine names, file names, timer
+values, etc. See slurm.h for full details on the data structure's contents.
+.TP
 \fIconf_info_msg_ptr\fP
-Specifies the pointer to the structure created by \fBslurm_load_ctl_conf\fR. 
-.TP 
+Specifies the pointer to the structure created by \fBslurm_load_ctl_conf\fR.
+.TP
 \fIout_file\fP
 Specifies the file to print data to.
-.TP 
+.TP
 \fIupdate_time\fP
 For all of the following informational calls, if update_time is equal to or
 greater than the last time changes where made to that information, new information
-is not returned.  Otherwise all the configuration. job, node, or partition records 
+is not returned.  Otherwise all the configuration. job, node, or partition records
 are returned.
 .SH "DESCRIPTION"
 .LP
 \fBslurm_api_version\fR Return the SLURM API version number.
-.LP 
-\fBslurm_free_ctl_conf\fR Release the storage generated by the 
+.LP
+\fBslurm_free_ctl_conf\fR Release the storage generated by the
 \fBslurm_load_ctl_conf\fR function.
-.LP 
+.LP
 \fBslurm_load_ctl_conf\fR Returns a slurm_ctl_conf_t that contains
 Slurm configuration records.
-.LP 
+.LP
 \fBslurm_print_ctl_conf\fR Prints the contents of the data structure loaded by the
 \fBslurm_load_ctl_conf\fR function.
 .SH "RETURN VALUE"
 .LP
-For \fBslurm_api_version\fR the SLURM API version number is returned. 
-All other functions return zero on success and \-1 on error with the 
+For \fBslurm_api_version\fR the SLURM API version number is returned.
+All other functions return zero on success and \-1 on error with the
 SLURM error code set appropriately.
 .SH "ERRORS"
 .LP
@@ -76,25 +76,25 @@ SLURM error code set appropriately.
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
 .br
 	slurm_ctl_conf_t * conf_info_msg_ptr = NULL;
 .br
 	long version = slurm_api_version;
 .LP
-	/* We can use the SLURM version number to determine how 
+	/* We can use the SLURM version number to determine how
 .br
 	 * API should be used */
 .br
@@ -120,28 +120,28 @@ int main (int argc, char *argv[])
 .br
 	/* The easy way to print */
 .br
-	slurm_print_ctl_conf (stdout, 
+	slurm_print_ctl_conf (stdout,
 .br
 	                      conf_info_msg_ptr);
 .LP
 	/* The hard way */
 .br
-	printf ("control_machine = %s\\n", 
+	printf ("control_machine = %s\\n",
 .br
 	        slurm_ctl_conf_ptr\->control_machine);
 .br
-	printf ("server_timeout = %u\\n", 
+	printf ("server_timeout = %u\\n",
 .br
 	        slurm_ctl_conf_ptr\->server_timeout);
 .LP
 	slurm_free_ctl_conf (conf_info_msg_ptr);
 .br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -163,7 +163,7 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBscontrol\fR(1), 
+.LP
+\fBscontrol\fR(1),
 \fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
 
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index e8ec50e9e97012b2d3d45420420c70346e106b9a..7f8188c4a574203a659541a43755da2e430536ac 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -1,8 +1,8 @@
 .TH "Slurm API" "3" "September 2006" "Morris Jette" "Slurm job information reporting functions"
 .SH "NAME"
-slurm_free_job_alloc_info_response_msg, slurm_free_job_info_msg, 
+slurm_free_job_alloc_info_response_msg, slurm_free_job_info_msg,
 slurm_get_end_time, slurm_get_rem_time, slurm_get_select_jobinfo,
-slurm_load_jobs, slurm_pid2jobid, 
+slurm_load_jobs, slurm_pid2jobid,
 slurm_print_job_info, slurm_print_job_info_msg
 \- Slurm job information reporting functions
 .LP
@@ -10,7 +10,7 @@ ISLURM_GET_REM_TIME, ISLURM_GET_REM_TIME2
 \- Fortran callable extensions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <time.h>
@@ -20,47 +20,47 @@ ISLURM_GET_REM_TIME, ISLURM_GET_REM_TIME2
 #include <sys/types.h>
 .LP
 void \fBslurm_free_job_alloc_info_response_msg\fR (
-.br 
+.br
 	job_alloc_info_response_msg_t *\fIjob_alloc_info_msg_ptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_free_job_info_msg\fR (
-.br 
+.br
 	job_info_msg_t *\fIjob_info_msg_ptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 int \fBslurm_load_job\fR (
-.br 
+.br
 	job_info_msg_t **\fIjob_info_msg_pptr\fP,
 .br
 	uint32_t \fIjob_id\fP
-.br 
+.br
 );
-.LP 
-.LP 
+.LP
+.LP
 int \fBslurm_load_jobs\fR (
-.br 
+.br
 	time_t \fIupdate_time\fP,
-.br 
+.br
 	job_info_msg_t **\fIjob_info_msg_pptr\fP,
 .br
 	uint16_t \fIshow_flags\fP
-.br 
+.br
 );
-.LP 
+.LP
 int \fBslurm_pid2jobid\fR (
 .br
 	pid_t \fIjob_pid\fP,
 .br
 	uint32_t *\fIjob_id_ptr\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_get_end_time\fR (
 .br
-	uint32_t \fIjobid\fP, 
+	uint32_t \fIjobid\fP,
 .br
 	time_t *\fIend_time_ptr\fP
 .br
@@ -71,7 +71,7 @@ long \fBslurm_get_rem_time\fR (
 	uint32_t \fIjob_id\fP
 .br
 );
-.LP 
+.LP
 void \fBslurm_print_job_info\fR (
 .br
 	FILE *\fIout_file\fP,
@@ -79,9 +79,9 @@ void \fBslurm_print_job_info\fR (
 	job_info_t *\fIjob_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_job_info_msg\fR (
 .br
 	FILE *\fIout_file\fP,
@@ -89,14 +89,14 @@ void \fBslurm_print_job_info_msg\fR (
 	job_info_msg_t *\fIjob_info_msg_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_get_select_jobinfo\fR (
 .br
 	select_jobinfo_t \fIjobinfo\fP,
 .br
-	enum select_data_type \fIdata_type\fP, 
+	enum select_data_type \fIdata_type\fP,
 .br
 	void *\fIdata\fP
 );
@@ -109,139 +109,139 @@ REM_TIME = ISLURM_GET_REM_TIME(JOBID)
 .br
 REM_TIME = ISLURM_GET_REM_TIME2()
 .LP
-ISLURM_GET_REM_TIME2() is equivalent to ISLURM_GET_REM_TIME() except 
-that the JOBID is taken from the SLURM_JOB_ID environment variable, 
+ISLURM_GET_REM_TIME2() is equivalent to ISLURM_GET_REM_TIME() except
+that the JOBID is taken from the SLURM_JOB_ID environment variable,
 which is set by SLURM for tasks which it launches.
-Both functions return the number of seconds remaining before the job 
+Both functions return the number of seconds remaining before the job
 reaches the end of it's allocated time.
 
 .SH "ARGUMENTS"
-.TP 
+.TP
 \fIdata_type\fP
-Identifies the type of data to retrieve \fIjobinfo\fP. Note that different types of 
-data are associated with different computer types and different configurations. 
+Identifies the type of data to retrieve \fIjobinfo\fP. Note that different types of
+data are associated with different computer types and different configurations.
 .TP
 \fIdata\fP
 The data value identified with \fIdata_type\fP is returned in the location specified
-by \fIdata\fP. If a type of data is requested that does not exist on a particular 
+by \fIdata\fP. If a type of data is requested that does not exist on a particular
 computer type or configuration, \fBslurm_get_select_jobinfo\fR returns an error.
-See the slurm.h header file for identification of the data types associated 
+See the slurm.h header file for identification of the data types associated
 with each value of \fIdata_type\fP.
 .TP
 \fIend_time_ptr\fP
-Specified a pointer to a storage location into which the expected termination 
+Specified a pointer to a storage location into which the expected termination
 time of a job is placed.
-.TP 
+.TP
 \fIjob_info_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with 
-the time of the last job update, a record count, and detailed information 
-about each job. Detailed job information is written to fixed sized records 
-and includes: ID number, name, user ID, state, assigned or requested node 
-names, indexes into the node table, etc. In the case of indexes into the 
-node table, this is an array of integers with pairs of start and end index 
-number into the node information records and the data is terminated with a 
-value of \-1. See slurm.h for full details on the data structure's contents. 
-.TP 
+Specifies the double pointer to the structure to be created and filled with
+the time of the last job update, a record count, and detailed information
+about each job. Detailed job information is written to fixed sized records
+and includes: ID number, name, user ID, state, assigned or requested node
+names, indexes into the node table, etc. In the case of indexes into the
+node table, this is an array of integers with pairs of start and end index
+number into the node information records and the data is terminated with a
+value of \-1. See slurm.h for full details on the data structure's contents.
+.TP
 \fIjob_id\fP
 Specifies a slurm job id. If zero, use the SLURM_JOB_ID environment variable
 to get the jobid.
-.TP 
+.TP
 \fIjob_id_ptr\fP
-Specifies a pointer to a storage location into which a Slurm job id may be 
+Specifies a pointer to a storage location into which a Slurm job id may be
 placed.
-.TP 
+.TP
 \fIjob_info_msg_ptr\fP
 Specifies the pointer to the structure created by \fBslurm_load_job\fR
-or \fBslurm_load_jobs\fR. 
+or \fBslurm_load_jobs\fR.
 .TP
 \fIjobinfo\fP
 Job\-specific information as constructed by Slurm's NodeSelect plugin.
 This data object is returned for each job by the \fBslurm_load_job\fR or
 \fBslurm_load_jobs\fR function.
-.TP 
+.TP
 \fIjob_pid\fP
 Specifies a process id of some process on the current node.
 .TP
 \fIjob_ptr\fP
-Specifies a pointer to a single job records from the \fIjob_info_msg_ptr\fP 
+Specifies a pointer to a single job records from the \fIjob_info_msg_ptr\fP
 data structure.
-.TP 
+.TP
 \fIone_liner\fP
 Print one record per line if non\-zero.
-.TP 
+.TP
 \fIout_file\fP
 Specifies the file to print data to.
-.TP 
+.TP
 \fIshow_flags\fP
-Job filtering flags, may be ORed. 
-Information about jobs in partitions that are configured as 
-hidden and partitions that the user's group is unable to utilize 
+Job filtering flags, may be ORed.
+Information about jobs in partitions that are configured as
+hidden and partitions that the user's group is unable to utilize
 are not reported by default.
-The \fBSHOW_ALL\fP flag will cause information about jobs in all 
+The \fBSHOW_ALL\fP flag will cause information about jobs in all
 partitions to be displayed.
-.TP 
+.TP
 \fIupdate_time\fP
-For all of the following informational calls, if update_time is equal to or 
-greater than the last time changes where made to that information, new 
-information is not returned.  Otherwise all the configuration. job, node, 
+For all of the following informational calls, if update_time is equal to or
+greater than the last time changes where made to that information, new
+information is not returned.  Otherwise all the configuration. job, node,
 or partition records are returned.
 .SH "DESCRIPTION"
 .LP
 \fBslurm_free_resource_allocation_response_msg\fR Free slurm resource
 allocation response message.
-.LP 
-\fBslurm_free_job_info_msg\fR Release the storage generated by the 
+.LP
+\fBslurm_free_job_info_msg\fR Release the storage generated by the
 \fBslurm_load_jobs\fR function.
-.LP 
-\fBslurm_get_end_time\fR Returns the expected termination time of a specified 
-SLURM job. The time corresponds to the exhaustion of the job\'s or partition\'s 
-time limit. NOTE: The data is cached locally and only retrieved from the 
+.LP
+\fBslurm_get_end_time\fR Returns the expected termination time of a specified
+SLURM job. The time corresponds to the exhaustion of the job\'s or partition\'s
+time limit. NOTE: The data is cached locally and only retrieved from the
 SLURM controller once per minute.
 .LP
-\fBslurm_get_rem_time\fR Returns the number of seconds remaining before the 
-expected termination time of a specified SLURM job id. The time corresponds 
-to the exhaustion of the job\'s or partition\'s time limit. NOTE: The data is 
+\fBslurm_get_rem_time\fR Returns the number of seconds remaining before the
+expected termination time of a specified SLURM job id. The time corresponds
+to the exhaustion of the job\'s or partition\'s time limit. NOTE: The data is
 cached locally and only retrieved from the SLURM controller once per minute.
-.LP 
-\fBslurm_load_job\fR Returns a job_info_msg_t that contains an update time, 
+.LP
+\fBslurm_load_job\fR Returns a job_info_msg_t that contains an update time,
 record count, and array of job_table records for some specific job ID.
-.LP 
-\fBslurm_load_jobs\fR Returns a job_info_msg_t that contains an update time, 
+.LP
+\fBslurm_load_jobs\fR Returns a job_info_msg_t that contains an update time,
 record count, and array of job_table records for all jobs.
-.LP 
-\fBslurm_pid2jobid\fR Returns a Slurm job id corresponding to the supplied 
-local process id. This only works for processes which Slurm spawns and their 
+.LP
+\fBslurm_pid2jobid\fR Returns a Slurm job id corresponding to the supplied
+local process id. This only works for processes which Slurm spawns and their
 descendants.
-.LP 
-\fBslurm_print_job_info\fR Prints the contents of the data structure 
-describing a single job records from the data loaded by the 
+.LP
+\fBslurm_print_job_info\fR Prints the contents of the data structure
+describing a single job records from the data loaded by the
 \fBslurm_load_node\fR function.
-.LP 
-\fBslurm_print_job_info_msg\fR Prints the contents of the data structure 
+.LP
+\fBslurm_print_job_info_msg\fR Prints the contents of the data structure
 describing all job records loaded by the \fBslurm_load_node\fR function.
 
 .SH "RETURN VALUE"
 .LP
 For \fBslurm_get_rem_time\fR on success a number of seconds is returned.
-For all other functions zero is returned on success. 
+For all other functions zero is returned on success.
 On error, \-1 is returned, and Slurm error code is set appropriately.
 
 .SH "ERRORS"
 .LP
 \fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
 .LP
-\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link 
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link
 your code.
 .LP
 \fBESLURM_INVALID_JOB_ID\fR Request for information about a non\-existent job.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .LP
 \fBINVAL\fR Invalid function argument.
 
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
@@ -249,11 +249,11 @@ SLURM controller.
 #include <slurm/slurm_errno.h>
 .br
 #include <sys/types.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	int i;
 .br
 	job_info_msg_t	* job_info_msg = NULL;
@@ -264,7 +264,7 @@ int main (int argc, char *argv[])
 .LP
 	/* get and dump some job information */
 .br
-	if ( slurm_load_jobs ((time_t) NULL, 
+	if ( slurm_load_jobs ((time_t) NULL,
 .br
 	                      &job_buffer_ptr, SHOW_ALL) ) {
 .br
@@ -292,15 +292,15 @@ int main (int argc, char *argv[])
 .br
 	printf ("Jobs updated at %lx, record count %d\\n",
 .br
-	        job_buffer_ptr\->last_update, 
+	        job_buffer_ptr\->last_update,
 .br
 	        job_buffer_ptr\->record_count);
 .br
 	for (i = 0; i < job_buffer_ptr\->record_count; i++) {
 .br
-		printf ("JobId=%u UserId=%u\\n", 
+		printf ("JobId=%u UserId=%u\\n",
 .br
-			job_buffer_ptr\->job_array[i].job_id, 
+			job_buffer_ptr\->job_array[i].job_id,
 .br
 			job_buffer_ptr\->job_array[i].user_id);
 .br
@@ -320,7 +320,7 @@ int main (int argc, char *argv[])
 .br
 			printf("JobId=%u Rotate=%u\\n",
 .br
-				job_buffer_ptr\->job_array[0].job_id, 
+				job_buffer_ptr\->job_array[0].job_id,
 .br
 				rotate);
 .br
@@ -337,16 +337,16 @@ int main (int argc, char *argv[])
 		printf ("Slurm job id = %u\\n", job_id);
 .br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTES"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 .LP
-Some data structures contain index values to cross\-reference each other. 
-If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this 
+Some data structures contain index values to cross\-reference each other.
+If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 .LP
 The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
@@ -372,10 +372,10 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
-\fBscontrol\fR(1), \fBsqueue\fR(1), 
-\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3), 
+.LP
+\fBscontrol\fR(1), \fBsqueue\fR(1),
+\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3),
 \fBslurm_hostlist_destroy\fR(3),
-\fBslurm_allocation_lookup\fR(3), 
+\fBslurm_allocation_lookup\fR(3),
 \fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
 
diff --git a/doc/man/man3/slurm_free_job_step_info_response_msg.3 b/doc/man/man3/slurm_free_job_step_info_response_msg.3
index a393e79bf04d1e14746841feeca564ec9aaedbea..448d0b49b51e51260d4b8af32750bda7e3bee1cc 100644
--- a/doc/man/man3/slurm_free_job_step_info_response_msg.3
+++ b/doc/man/man3/slurm_free_job_step_info_response_msg.3
@@ -6,31 +6,31 @@ slurm_print_job_step_info, slurm_print_job_step_info_msg
 \- Slurm job step information reporting functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .LP
 void \fBslurm_free_job_step_info_response_msg\fR (
-.br 
+.br
 	job_step_info_response_msg_t *\fIjob_step_info_msg_ptr\fP
-.br 
+.br
 );
 .LP
 void \fBslurm_get_job_steps\fR (
-.br 
+.br
 	time_t *\fIupdate_time\fP,
 .br
-	uint32_t \fIjob_id\fP, 
+	uint32_t \fIjob_id\fP,
 .br
-	uint32_t \fIstep_id\fP, 
+	uint32_t \fIstep_id\fP,
 .br
 	job_step_info_response_msg_t **\fIjob_step_info_msg_pptr\fP,
 .br
 	uint16_t \fIshow_flags\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_job_step_info\fR (
 .br
 	FILE *\fIout_file\fp,
@@ -38,9 +38,9 @@ void \fBslurm_print_job_step_info\fR (
 	job_step_info_t *\fIjob_step_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_job_step_info_msg\fR (
 .br
 	FILE *\fIout_file\fp,
@@ -48,39 +48,39 @@ void \fBslurm_print_job_step_info_msg\fR (
 	job_step_info_response_msg_t *\fIjob_step_info_msg_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fIjob_id\fP
 Specifies a slurm job ID. A value of zero implies all jobs.
-.TP 
+.TP
 \fIjob_step_info_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled 
-with the time of the last node update, a record count, and detailed 
-information about each job step specified. Detailed job step information 
+Specifies the double pointer to the structure to be created and filled
+with the time of the last node update, a record count, and detailed
+information about each job step specified. Detailed job step information
 is written to fixed sized records and includes: job_id, step_id, node
-names, etc. See slurm.h for full details on the data structure's contents. 
-.TP 
+names, etc. See slurm.h for full details on the data structure's contents.
+.TP
 \fIjob_step_info_msg_ptr\fP
-Specifies the pointer to the structure created by the function 
-\fBslurm_get_job_steps\fP. 
+Specifies the pointer to the structure created by the function
+\fBslurm_get_job_steps\fP.
 .TP
 \fIjob_step_ptr\fP
-Specifies a pointer to a single job step records from the \fIjob_step_info_msg_pptr\fP 
+Specifies a pointer to a single job step records from the \fIjob_step_info_msg_pptr\fP
 data structure.
-.TP 
+.TP
 \fIone_liner\fP
 Print one record per line if non\-zero.
-.TP 
+.TP
 \fIout_file\fP
 Specifies the file to print data to.
-.TP 
+.TP
 \fIshow_flags\fP
 Job filtering flags, may be ORed.
-Information about job steps in partitions that are configured as 
-hidden and partitions that the user's group is unable to utilize 
+Information about job steps in partitions that are configured as
+hidden and partitions that the user's group is unable to utilize
 are not reported by default.
 The \fBSHOW_ALL\fP flag will cause information about job steps in all
 partitions to be displayed.
@@ -88,28 +88,28 @@ partitions to be displayed.
 .TP
 \fIstep_id\fP
 Specifies a slurm job step ID. A value of zero implies all job steps.
-.TP 
+.TP
 \fIupdate_time\fP
 For all of the following informational calls, if update_time is equal to or greater than the last time changes where made to that information, new information is not returned.  Otherwise all the configuration. job, node, or partition records are returned.
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_free_job_step_info_response_msg\fR Release the storage generated by 
+.LP
+\fBslurm_free_job_step_info_response_msg\fR Release the storage generated by
 the \fBslurm_get_job_steps\fR function.
 .LP
-\fBslurm_get_job_steps\fR Loads into details about job steps that satisfy 
-the \fIjob_id\fP and/or \fIstep_id\fP specifications provided if the data 
+\fBslurm_get_job_steps\fR Loads into details about job steps that satisfy
+the \fIjob_id\fP and/or \fIstep_id\fP specifications provided if the data
 has been updated since the \fIupdate_time\fP specified.
-.LP 
-\fBslurm_print_job_step_info\fR Prints the contents of the data structure 
-describing a single job step records from the data loaded by the 
+.LP
+\fBslurm_print_job_step_info\fR Prints the contents of the data structure
+describing a single job step records from the data loaded by the
 \fslurm_get_job_steps\fR function.
-.LP 
-\fBslurm_print_job_step_info_msg\fR Prints the contents of the data 
+.LP
+\fBslurm_print_job_step_info_msg\fR Prints the contents of the data
 structure describing all job step records loaded by the
 \fslurm_get_job_steps\fR function.
 .SH "RETURN VALUE"
 .LP
-On success, zero is returned. On error, \-1 is returned, and Slurm error 
+On success, zero is returned. On error, \-1 is returned, and Slurm error
 code is set appropriately.
 .SH "ERRORS"
 .LP
@@ -117,20 +117,20 @@ code is set appropriately.
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	int i;
 .br
 	job_step_info_response_msg_t * step_info_ptr = NULL;
@@ -151,7 +151,7 @@ int main (int argc, char *argv[])
 .LP
 	/* The easy way to print... */
 .br
-	slurm_print_job_step_info_msg (stdout, 
+	slurm_print_job_step_info_msg (stdout,
 .br
 	                               step_info_ptr);
 .LP
@@ -169,36 +169,36 @@ int main (int argc, char *argv[])
 .br
 	printf ("Steps updated at %lx, record count %d\\n",
 .br
-	        step_info_ptr\->last_update, 
+	        step_info_ptr\->last_update,
 .br
 	        step_info_ptr\->job_step_count);
 .br
 	for (i = 0; i < step_info_ptr\->job_step_count; i++) {
 .br
-		printf ("JobId=%u StepId=%u\\n", 
+		printf ("JobId=%u StepId=%u\\n",
 .br
-			step_info_ptr\->job_steps[i].job_id, 
+			step_info_ptr\->job_steps[i].job_id,
 .br
 			step_info_ptr\->job_steps[i].step_id);
 .br
-	}			
+	}
 .LP
 	slurm_free_job_step_info_response_msg(step_info_ptr);
 .br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTES"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 .LP
-Some data structures contain index values to cross\-reference each other. 
+Some data structures contain index values to cross\-reference each other.
 If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 .LP
-The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list 
+The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
 expressions into a collection of individual node names.
 
 .SH "COPYING"
@@ -220,10 +220,10 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
-\fBscontrol\fR(1), \fBsqueue\fR(1), 
-\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3), 
+.LP
+\fBscontrol\fR(1), \fBsqueue\fR(1),
+\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3),
 \fBslurm_hostlist_destroy\fR(3),
-\fBslurm_get_errno\fR(3), \fBslurm_load_jobs\fR(3), 
+\fBslurm_get_errno\fR(3), \fBslurm_load_jobs\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
 
diff --git a/doc/man/man3/slurm_free_node_info.3 b/doc/man/man3/slurm_free_node_info.3
index 259d08f89ebf1386502f6e423958524d0bfbcb0c..452258354c93d154853fd0b56f4636da0e9a0e77 100644
--- a/doc/man/man3/slurm_free_node_info.3
+++ b/doc/man/man3/slurm_free_node_info.3
@@ -1,30 +1,30 @@
 .TH "Slurm API" "3" "January 2006" "Morris Jette" "Slurm node informational calls"
 .SH "NAME"
-slurm_free_node_info, slurm_load_node, 
+slurm_free_node_info, slurm_load_node,
 slurm_print_node_info_msg, slurm_print_node_table, slurm_sprint_node_table
 \- Slurm node information reporting functions
 .SH "SYNTAX"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .LP
 void \fBslurm_free_node_info\fR (
-.br 
+.br
 	node_info_msg_t *\fInode_info_msg_ptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 int \fBslurm_load_node\fR (
-.br 
-	time_t \fIupdate_time\fP, 
-.br 
+.br
+	time_t \fIupdate_time\fP,
+.br
 	node_info_msg_t **\fInode_info_msg_pptr\fP,
 .br
 	uint16_t \fIshow_flags\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_node_info_msg\fR (
 .br
 	FILE *\fIout_file\fp,
@@ -32,9 +32,9 @@ void \fBslurm_print_node_info_msg\fR (
 	node_info_msg_t *\fInode_info_msg_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_node_table\fR (
 .br
 	FILE *\fIout_file\fp,
@@ -44,9 +44,9 @@ void \fBslurm_print_node_table\fR (
 	int \fInode_scaling\fP
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 char *\fBslurm_sprint_node_table\fR (
 .br
 	node_info_t *\fInode_ptr\fP,
@@ -54,90 +54,90 @@ char *\fBslurm_sprint_node_table\fR (
 	int \fInode_scaling\fP
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fInode_info_msg_ptr\fP
-Specifies the pointer to the structure created by \fBslurm_load_node\fR. 
-.TP 
+Specifies the pointer to the structure created by \fBslurm_load_node\fR.
+.TP
 \fInode_info_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with 
-the time of the last node update, a record count, and detailed information 
-about each node. Detailed node information is written to fixed sized records 
-and includes: name, state, processor count, memory size, etc. See slurm.h for 
-full details on the data structure's contents. 
-.TP 
+Specifies the double pointer to the structure to be created and filled with
+the time of the last node update, a record count, and detailed information
+about each node. Detailed node information is written to fixed sized records
+and includes: name, state, processor count, memory size, etc. See slurm.h for
+full details on the data structure's contents.
+.TP
 \fInode_info_msg_ptr\fP
-Specifies the pointer to the structure created by \fBslurm_load_node\fR. 
+Specifies the pointer to the structure created by \fBslurm_load_node\fR.
 .TP
 \fInode_ptr\fP
-Specifies a pointer to a single node records from the \fInode_info_msg_ptr\fP 
+Specifies a pointer to a single node records from the \fInode_info_msg_ptr\fP
 data structure.
-.TP 
+.TP
 \fInode_scaling\fP
 number of nodes each node represents default is 1.
-.TP 
+.TP
 \fIone_liner\fP
 Print one record per line if non\-zero.
-.TP 
+.TP
 \fIout_file\fP
 Specifies the file to print data to.
-.TP 
+.TP
 \fIshow_flags\fP
 Job filtering flags, may be ORed.
-Information about nodes in partitions that are configured as 
-hidden and partitions that the user's group is unable to utilize 
+Information about nodes in partitions that are configured as
+hidden and partitions that the user's group is unable to utilize
 are not reported by default.
 The \fBSHOW_ALL\fP flag will cause information about nodes in all
 partitions to be displayed.
 
-.TP 
+.TP
 \fIupdate_time\fP
-For all of the following informational calls, if update_time is equal to 
-or greater than the last time changes where made to that information, new 
-information is not returned.  Otherwise all the configuration. job, node, 
+For all of the following informational calls, if update_time is equal to
+or greater than the last time changes where made to that information, new
+information is not returned.  Otherwise all the configuration. job, node,
 or partition records are returned.
 .SH "DESCRIPTION"
-.LP 
+.LP
 \fBslurm_free_node_info\fR Release the storage generated by the
 \fBslurm_load_node\fR function.
-.LP 
-\fBslurm_load_node\fR Returns a \fInode_info_msg_t\fP that contains an update 
+.LP
+\fBslurm_load_node\fR Returns a \fInode_info_msg_t\fP that contains an update
 time, record count, and array of node_table records for all nodes.
-.LP 
-\fBslurm_print_node_info_msg\fR Prints the contents of the data structure 
-describing all node records from the data loaded by the \fBslurm_load_node\fR 
+.LP
+\fBslurm_print_node_info_msg\fR Prints the contents of the data structure
+describing all node records from the data loaded by the \fBslurm_load_node\fR
 function.
-.LP 
-\fBslurm_print_node_table\fR Prints the contents of the data structure 
+.LP
+\fBslurm_print_node_table\fR Prints the contents of the data structure
 describing a single node record loaded by the \fBslurm_load_node\fR function.
 .SH "RETURN VALUE"
 .LP
-On success, zero is returned. On error, \-1 is returned, and Slurm error code 
+On success, zero is returned. On error, \-1 is returned, and Slurm error code
 is set appropriately.
 .SH "ERRORS"
 .LP
 \fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
 .LP
-\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link 
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link
 your code.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	int i, j, k;
 .br
 	partition_info_msg_t *part_info_ptr = NULL;
@@ -150,7 +150,7 @@ int main (int argc, char *argv[])
 .LP
 	/* get and dump some node information */
 .br
-	if ( slurm_load_node ((time_t) NULL, 
+	if ( slurm_load_node ((time_t) NULL,
 .br
 	                      &node_buffer_ptr, SHOW_ALL) ) {
 .br
@@ -178,13 +178,13 @@ int main (int argc, char *argv[])
 .br
 	for (i = 0; i < node_buffer_ptr\->node_count; i++) {
 .br
-		printf ("NodeName=%s CPUs=%u\\n", 
+		printf ("NodeName=%s CPUs=%u\\n",
 .br
-			node_buffer_ptr\->node_array[i].name, 
+			node_buffer_ptr\->node_array[i].name,
 .br
 			node_buffer_ptr\->node_array[i].cpus);
 .br
-	}			
+	}
 .LP
 	/* get and dump some partition information */
 .br
@@ -194,7 +194,7 @@ int main (int argc, char *argv[])
 .br
 	/* not changed since */
 .br
-	if ( slurm_load_partitions ((time_t) NULL, 
+	if ( slurm_load_partitions ((time_t) NULL,
 .br
 	                            &part_buffer_ptr) ) {
 .br
@@ -208,7 +208,7 @@ int main (int argc, char *argv[])
 .br
 		part_ptr = &part_info_ptr\->partition_array[i];
 .br
-		printf ("PartitionName=%s Nodes=", 
+		printf ("PartitionName=%s Nodes=",
 .br
 			part_ptr\->name);
 .br
@@ -218,9 +218,9 @@ int main (int argc, char *argv[])
 .br
 				break;
 .br
-			for (k = part_ptr\->node_inx[j]; 
+			for (k = part_ptr\->node_inx[j];
 .br
-			     k <= part_ptr\->node_inx[j+1]; 
+			     k <= part_ptr\->node_inx[j+1];
 .br
 			     k++) {
 .br
@@ -241,15 +241,15 @@ int main (int argc, char *argv[])
 	slurm_free_partition_info (part_buffer_ptr);
 .br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTES"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 .LP
-Some data structures contain index values to cross\-reference each other. 
+Some data structures contain index values to cross\-reference each other.
 If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 
@@ -271,8 +271,8 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBscontrol\fR(1), \fBsqueue\fR(1), \fBslurm_allocation_lookup\fR(3), 
+.LP
+\fBscontrol\fR(1), \fBsqueue\fR(1), \fBslurm_allocation_lookup\fR(3),
 \fBslurm_get_errno\fR(3), \fBslurm_load_partitions\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
 
diff --git a/doc/man/man3/slurm_free_partition_info.3 b/doc/man/man3/slurm_free_partition_info.3
index 617f7fa311234c2400a41781cabe1f50ba4d41cd..bdb53e401ec5b6c104b0861a467209df22f444df 100644
--- a/doc/man/man3/slurm_free_partition_info.3
+++ b/doc/man/man3/slurm_free_partition_info.3
@@ -1,6 +1,6 @@
 .TH "Slurm API" "3" "September 2006" "Morris Jette" "Slurm partition information reporting functions"
 .SH "NAME"
-slurm_free_partition_info_msg, slurm_load_partitions, 
+slurm_free_partition_info_msg, slurm_load_partitions,
 slurm_print_partition_info, slurm_print_partition_info_msg
 \- Slurm partition information reporting functions
 .SH "SYNTAX"
@@ -10,21 +10,21 @@ slurm_print_partition_info, slurm_print_partition_info_msg
 #include <slurm/slurm.h>
 .LP
 void \fBslurm_free_partition_info_msg\fR (
-.br 
+.br
 	partition_info_msg_t *\fIpartition_info_msg_ptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 int \fBslurm_load_partitions\fR (
-.br 
-	time_t \fIupdate_time\fR, 
-.br 
+.br
+	time_t \fIupdate_time\fR,
+.br
 	partition_info_msg_t **\fIpartition_info_msg_pptr\fP,
 .br
 	uint16_t \fIshow_flags\fP
-.br 
+.br
  );
-.LP 
+.LP
 void \fBslurm_print_partition_info\fR (
 .br
 	FILE *\fIout_file\fP,
@@ -32,9 +32,9 @@ void \fBslurm_print_partition_info\fR (
 	partition_info_t *\fIpartition_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_partition_info_msg\fR (
 .br
 	FILE *\fIout_file\fP,
@@ -42,55 +42,55 @@ void \fBslurm_print_partition_info_msg\fR (
 	partition_info_msg_t *\fIpartition_info_msg_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIone_liner\fP
 Print one record per line if non\-zero.
-.TP 
+.TP
 \fIout_file\fP
 Specifies the file to print data to.
 .TP
 \fIpartition_info_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with the time 
-of the last partition update, a record count, and detailed information about each 
-partition. Detailed partition information is written to fixed sized records and includes: 
-name, state, job time limit, job size limit, node names, indexes into the node table, 
-etc. In the case of indexes into the node table, this is an array of integers with 
-pairs of start and end index number into the node information records and the 
-data is terminated with a value of \-1. See slurm.h for full details on the data 
-structure's contents. 
-.TP 
+Specifies the double pointer to the structure to be created and filled with the time
+of the last partition update, a record count, and detailed information about each
+partition. Detailed partition information is written to fixed sized records and includes:
+name, state, job time limit, job size limit, node names, indexes into the node table,
+etc. In the case of indexes into the node table, this is an array of integers with
+pairs of start and end index number into the node information records and the
+data is terminated with a value of \-1. See slurm.h for full details on the data
+structure's contents.
+.TP
 \fIpartition_info_msg_ptr\fP
-Specifies the pointer to the structure created by \fBslurm_load_partitions\fP. 
-.TP 
+Specifies the pointer to the structure created by \fBslurm_load_partitions\fP.
+.TP
 \fIshow_flags\fP
 Job filtering flags, may be ORed.
-Information about partitions that are configured as 
-hidden and partitions that the user's group is unable to utilize 
+Information about partitions that are configured as
+hidden and partitions that the user's group is unable to utilize
 are not reported by default.
-The \fBSHOW_ALL\fP flag will cause information about partitions 
+The \fBSHOW_ALL\fP flag will cause information about partitions
 to be displayed.
 .TP
 \fIupdate_time\fP
-For all of the following informational calls, if update_time is equal to or greater 
-than the last time changes where made to that information, new information is 
-not returned.  Otherwise all the configuration. job, node, or partition records 
+For all of the following informational calls, if update_time is equal to or greater
+than the last time changes where made to that information, new information is
+not returned.  Otherwise all the configuration. job, node, or partition records
 are returned.
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_free_partition_info_msg\fR Release the storage generated by the 
+.LP
+\fBslurm_free_partition_info_msg\fR Release the storage generated by the
 \fBslurm_load_partitions\fR function.
-.LP 
-\fBslurm_load_partitions\fR Returns a partition_info_msg_t that contains an 
+.LP
+\fBslurm_load_partitions\fR Returns a partition_info_msg_t that contains an
 update time, record count, and array of partition_table records for all partitions.
-.LP 
-\fBslurm_print_partition_info\fR Prints the contents of the data structure describing a 
+.LP
+\fBslurm_print_partition_info\fR Prints the contents of the data structure describing a
 single partition records from the data loaded by the \fBslurm_load_partitions\fR function.
-.LP 
-\fBslurm_print_partition_info_msg\fR Prints the contents of the data structure describing 
+.LP
+\fBslurm_print_partition_info_msg\fR Prints the contents of the data structure describing
 all partition records loaded by the \fBslurm_load_partitions\fR function.
 .SH "RETURN VALUE"
 .LP
@@ -99,13 +99,13 @@ On success, zero is returned. On error, \-1 is returned, and Slurm error code is
 .LP
 \fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
 .LP
-\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link 
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link
 your code.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <stdlib.h>
@@ -113,9 +113,9 @@ SLURM controller.
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
 .br
 	int i;
@@ -138,7 +138,7 @@ int main (int argc, char *argv[])
 .LP
 	/* The easy way to print... */
 .br
-	slurm_print_partition_info_msg (stdout, 
+	slurm_print_partition_info_msg (stdout,
 .br
 	                                part_info_ptr, 0);
 .LP
@@ -156,15 +156,15 @@ int main (int argc, char *argv[])
 .br
 	printf("Partitions updated at %lx, records=%d\\n",
 .br
-	       part_info_ptr\->last_update, 
+	       part_info_ptr\->last_update,
 .br
 	       part_info_ptr\->record_count);
 .br
 	for (i = 0; i < part_info_ptr\->record_count; i++) {
 .br
-		printf ("PartitionName=%s Nodes=%s\\n", 
+		printf ("PartitionName=%s Nodes=%s\\n",
 .br
-			part_info_ptr\->partition_array[i].name, 
+			part_info_ptr\->partition_array[i].name,
 .br
 			part_info_ptr\->partition_array[i].nodes );
 .br
@@ -173,15 +173,15 @@ int main (int argc, char *argv[])
 	slurm_free_partition_info_msg (part_info_ptr);
 .br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTES"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 .LP
-Some data structures contain index values to cross\-reference each other. 
+Some data structures contain index values to cross\-reference each other.
 If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
 data, these index values will be invalid.
 .LP
@@ -207,10 +207,10 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
-\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1), 
-\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3), 
+.LP
+\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1),
+\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3),
 \fBslurm_hostlist_destroy\fR(3),
-\fBslurm_get_errno\fR(3), \fBslurm_load_node\fR(3), 
+\fBslurm_get_errno\fR(3), \fBslurm_load_node\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
 
diff --git a/doc/man/man3/slurm_get_errno.3 b/doc/man/man3/slurm_get_errno.3
index 6b03c37abd56dc4d226408b2d074a0af34d0b60e..ade9ebb15352003cebd7836a04b0686e5fbd4edd 100644
--- a/doc/man/man3/slurm_get_errno.3
+++ b/doc/man/man3/slurm_get_errno.3
@@ -4,59 +4,59 @@ slurm_get_errno, slurm_perror, slurm_strerror \- Slurm error handling functions
 .SH "SYNTAX"
 .LP
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int \fBslurm_get_errno\fR ( );
 .LP
-void \fBslurm_perror\fR ( 
-.br 
-	char *\fIheader\fP 
-.br 
+void \fBslurm_perror\fR (
+.br
+	char *\fIheader\fP
+.br
 );
 .LP
 char * \fBslurm_strerror\fR (
-.br 
+.br
 	int \fIerrnum\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIerrnum\fP
 A Slurm error code.
-.TP 
+.TP
 \fIheader\fP
-A pointer to a string used as a message header for printing along with an error description. 
+A pointer to a string used as a message header for printing along with an error description.
 .SH "DESCRIPTION"
-.LP 
+.LP
 \fBslurm_get_errno\fR Return the error code as set by the Slurm API function executed.
-.LP 
+.LP
 \fBslurm_perror\fR Print to standard error the supplied header followed by a colon followed by a text description of the last Slurm error code generated.
-.LP 
+.LP
 \fBslurm_strerror\fR Given a Slurm error code, return a pointer to a text description of the error's meaning.
 .SH "RETURN VALUE"
 .LP
 \fBslurm_get_errno\fR returns an error code or zero if no error was generated by the last Slurm function call executed. \fBslurm_strerror\fR returns a pointer to a text string, which is empty if no error was generated by the last Slurm function call executed.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
 .br
 	/* assume Slurm API function failed here */
 .br
-	fprintf (stderr, "Slurm function errno = %d\\n", 
+	fprintf (stderr, "Slurm function errno = %d\\n",
 .br
 	         slurm_get_errno ());
 .br
 	fprintf (stderr, "Slurm function errno = %d %s\\n",
 .br
-	         slurm_get_errno (), 
+	         slurm_get_errno (),
 .br
 	         slurm_strerror (slurm_get_errno ()));
 .br
@@ -67,7 +67,7 @@ int main (int argc, char *argv[])
 }
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -89,23 +89,23 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
+.LP
 \fBslurm_allocate_resources\fR(3),
-\fBslurm_complete_job\fR(3), \fBslurm_complete_job_step\fR(3), 
-\fBslurm_allocation_lookup\fR(3), 
-\fBslurm_free_ctl_conf\fR(3), \fBslurm_free_job_info_msg\fR(3), 
-\fBslurm_free_job_step_create_response_msg\fR(3), 
-\fBslurm_free_node_info\fR(3), \fBslurm_free_partition_info\fR(3), 
-\fBslurm_free_resource_allocation_response_msg\fR(3), 
-\fBslurm_free_submit_response_response_msg\fR(3), 
+\fBslurm_complete_job\fR(3), \fBslurm_complete_job_step\fR(3),
+\fBslurm_allocation_lookup\fR(3),
+\fBslurm_free_ctl_conf\fR(3), \fBslurm_free_job_info_msg\fR(3),
+\fBslurm_free_job_step_create_response_msg\fR(3),
+\fBslurm_free_node_info\fR(3), \fBslurm_free_partition_info\fR(3),
+\fBslurm_free_resource_allocation_response_msg\fR(3),
+\fBslurm_free_submit_response_response_msg\fR(3),
 \fBslurm_get_job_steps\fR(3),
-\fBslurm_init_job_desc_msg\fR(3), \fBslurm_init_part_desc_msg\fR(3), 
-\fBslurm_job_step_create\fR(3), \fBslurm_job_will_run\fR(3), 
-\fBslurm_kill_job\fR(3), \fBslurm_kill_job_step\fR(3), 
-\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), 
-\fBslurm_load_node\fR(3), \fBslurm_load_partitions\fR(3), 
+\fBslurm_init_job_desc_msg\fR(3), \fBslurm_init_part_desc_msg\fR(3),
+\fBslurm_job_step_create\fR(3), \fBslurm_job_will_run\fR(3),
+\fBslurm_kill_job\fR(3), \fBslurm_kill_job_step\fR(3),
+\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3),
+\fBslurm_load_node\fR(3), \fBslurm_load_partitions\fR(3),
 \fBslurm_pid2jobid\fR(3),
-\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3), \fBslurm_submit_batch_job\fR(3), 
+\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3), \fBslurm_submit_batch_job\fR(3),
 \fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3), \fBslurm_update_partition\fR(3)
 
- 
+
diff --git a/doc/man/man3/slurm_hostlist_create.3 b/doc/man/man3/slurm_hostlist_create.3
index 5cbb3a64f6aa4a5744dcb33c2e485fadc14a2556..9d75f2ceffd2a503e11b33b17448358ed3c9babb 100644
--- a/doc/man/man3/slurm_hostlist_create.3
+++ b/doc/man/man3/slurm_hostlist_create.3
@@ -5,9 +5,9 @@ slurm_hostlist_create, slurm_hostlist_shift, slurm_hostlist_destroy
 \- Slurm host list support functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
-.LP 
+.LP
 .LP
 hostlist_t \fBslurm_hostlist_create\fR (
 .br
@@ -28,48 +28,48 @@ void \fBslurm_hostlist_destroy\fR (
 );
 
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fInode_list\fP
-A list of nodes as returned by the 
-\fBslurm_job_step_create\fR functions. The returned value may include a simple 
-range format to describe numeric ranges of values and/or multiple numeric 
+A list of nodes as returned by the
+\fBslurm_job_step_create\fR functions. The returned value may include a simple
+range format to describe numeric ranges of values and/or multiple numeric
 values (e.g. "linux[1\-3,6]" represents "linux1", "linux2", "linux3", and "linux6").
 .TP
 \fIhost_list\fP
-A hostlist created by the \fBslurm_hostlist_create\fR function. 
+A hostlist created by the \fBslurm_hostlist_create\fR function.
 
 .SH "DESCRIPTION"
 .LP
-\fBslurm_hostlist_create\fR creates a database of node names from a range format 
-describing node names. Use \fBslurm_hostlist_destroy\fR to release storage associated 
+\fBslurm_hostlist_create\fR creates a database of node names from a range format
+describing node names. Use \fBslurm_hostlist_destroy\fR to release storage associated
 with the database when no longer required.
 .LP
-\fBslurm_hostlist_shift\fR extracts the first entry from the host list database created 
+\fBslurm_hostlist_shift\fR extracts the first entry from the host list database created
 by the \fBslurm_hostlist_create\fR function.
 .LP
-\fBslurm_hostlist_destroy\fR releases storage associated with a database created by 
+\fBslurm_hostlist_destroy\fR releases storage associated with a database created by
 \fBslurm_hostlist_create\fR when no longer required.
 
 .SH "RETURN VALUE"
 .LP
-\fBslurm_hostlist_create\fR returns the host list database or NULL if memory can not be 
+\fBslurm_hostlist_create\fR returns the host list database or NULL if memory can not be
 allocated for the database.
 
 .LP
-\fBslurm_hostlist_shift\fR returns a character string or NULL if no entries remain in 
+\fBslurm_hostlist_shift\fR returns a character string or NULL if no entries remain in
 the database.
 
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <hostlist.h>
 .br
 #include <slurm.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
 .br
 	hostlist_t my_hostlist;
@@ -92,16 +92,16 @@ int main (int argc, char *argv[])
 .LP
 	while ( (host = slurm_hostlist_shift(my_hostlist)) )
 .br
-		printf ("host = %s\\n", host); 
+		printf ("host = %s\\n", host);
 .LP
 	slurm_hostlist_destroy (my_hostlist) ;
 .br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -124,5 +124,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
+.LP
 \fBslurm_get_job_steps\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_partitions\fB(3)
diff --git a/doc/man/man3/slurm_job_step_create.3 b/doc/man/man3/slurm_job_step_create.3
index af9356b5129ffae7a2979dfa872c262440f40572..90a2b4ce53937489811cfa12b16f22d29a5e7da9 100644
--- a/doc/man/man3/slurm_job_step_create.3
+++ b/doc/man/man3/slurm_job_step_create.3
@@ -5,9 +5,9 @@ slurm_free_job_step_create_response_msg, slurm_job_step_create
 \- Slurm job step initiation functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
-.LP 
+.LP
 .LP
 void \fBslurm_free_job_step_create_response_msg\fR (
 .br
@@ -17,32 +17,32 @@ void \fBslurm_free_job_step_create_response_msg\fR (
 .LP
 int \fBslurm_job_step_create\fR (
 .br
-	job_step_create_request_msg_t *\fIslurm_step_alloc_req_msg_ptr\fP, 
+	job_step_create_request_msg_t *\fIslurm_step_alloc_req_msg_ptr\fP,
 .br
 	job_step_create_response_msg_t **\fIslurm_step_alloc_resp_msg_pptr\fP
 .br
 );
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fIslurm_step_alloc_req_msg_ptr\fP
-Specifies the pointer to the structure with job step request specification. See 
+Specifies the pointer to the structure with job step request specification. See
 slurm.h for full details on the data structure's contents.
 .TP
 \fIslurm_step_alloc_resp_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled with a description of the 
-created job step: node allocation, credentials, etc. See slurm.h for full details on the data 
-structure's contents. 
+Specifies the double pointer to the structure to be created and filled with a description of the
+created job step: node allocation, credentials, etc. See slurm.h for full details on the data
+structure's contents.
 .SH "DESCRIPTION"
 .LP
-\fBslurm_free_job_step_create_response_msg\fR Release the storage generated in response 
+\fBslurm_free_job_step_create_response_msg\fR Release the storage generated in response
 to a call of the function \fBslurm_job_step_create\fR.
 .LP
-\fBslurm_job_step_create\fR Initialize a job step including the allocation of nodes to 
-it from those already allocate to that job. Always release the response message when no 
-longer required using the function \fBslurm_free_job_step_create_response_msg\fR. 
-The list of host names returned may be matched to their data in the proper order by 
-using the functions \fBhostlist_create\fR, \fBhostlist_shift\fR, and 
+\fBslurm_job_step_create\fR Initialize a job step including the allocation of nodes to
+it from those already allocate to that job. Always release the response message when no
+longer required using the function \fBslurm_free_job_step_create_response_msg\fR.
+The list of host names returned may be matched to their data in the proper order by
+using the functions \fBhostlist_create\fR, \fBhostlist_shift\fR, and
 \fBhostlist_destroy\fR.
 .SH "RETURN VALUE"
 .LP
@@ -51,24 +51,24 @@ On success, zero is returned. On error, \-1 is returned, and Slurm error code is
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
-\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified. 
+\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job).
 .LP
 \fBESLURM_DISABLED\fR the ability to create a job step is currently disabled.
 This is indicative of the job being suspended. Retry the call as desired.
 .LP
-\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect. 
+\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
-\fBESLURM_BAD_DIST\fR task distribution specification is invalid. 
+\fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -90,7 +90,7 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBhostlist_create\fR(3), \fBhostlist_shift\fR(3), \fBhostlist_destroy\fR(3), 
-\fBsrun\fR(1), 
+.LP
+\fBhostlist_create\fR(3), \fBhostlist_shift\fR(3), \fBhostlist_destroy\fR(3),
+\fBsrun\fR(1),
 \fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3
index c1d3de090120a69e2702f00500b51968e7161fcc..e599318f445fbd8696fb45f0f1ed460068e18196 100644
--- a/doc/man/man3/slurm_kill_job.3
+++ b/doc/man/man3/slurm_kill_job.3
@@ -4,65 +4,65 @@ slurm_kill_job, slurm_kill_job_step,
 slurm_signal_job, slurm_signal_job_step,
 slurm_terminate_job, slurm_terminate_job_step \- Slurm job signal calls
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
 .LP
 int \fBslurm_kill_job\fR (
-.br 
+.br
 	uint32_t \fIjob_id\fP,
-.br 
+.br
 	uint16_t \fIsignal\fP,
 .br
 	uint16_t \fIbatch_flag\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_kill_job_step\fR (
-.br 
+.br
 	uint32_t \fIjob_id\fP,
-.br 
+.br
 	uint32_t \fIjob_step_id\fP,
-.br 
+.br
 	uint16_t \fIsignal\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_signal_job\fR (
-.br 
+.br
 	uint32_t \fIjob_id\fP,
-.br 
+.br
 	uint16_t \fIsignal\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_signal_job_step\fR (
-.br 
+.br
 	uint32_t \fIjob_id\fP,
-.br 
+.br
 	uint32_t \fIjob_step_id\fP,
-.br 
+.br
 	uint16_t \fIsignal\fP
-.br 
+.br
 );
 .LP
 int \fBslurm_terminate_job\fR (
-.br 
+.br
 	uint32_t \fIjob_id\fP,
-.br 
+.br
 );
 .LP
 int \fBslurm_terminate_job_step\fR (
-.br 
+.br
 	uint32_t \fIjob_id\fP,
-.br 
+.br
 	uint32_t \fIjob_step_id\fP,
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
+.LP
 \fIbatch_flag\fP
 If non\-zero then signal only the batch job shell.
-.TP 
+.TP
 \fIjob_id\fP
 Slurm job id number.
 .TP
@@ -72,29 +72,29 @@ Slurm job step id number.
 \fIsignal\fp
 Signal to be sent to the job or job step.
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_kill_job\fR Request that a signal be sent to either the batch job shell 
-(if \fIbatch_flag\fP is non\-zero) or all steps of the specified job. 
+.LP
+\fBslurm_kill_job\fR Request that a signal be sent to either the batch job shell
+(if \fIbatch_flag\fP is non\-zero) or all steps of the specified job.
 If the job is pending and the signal is SIGKILL, the job will be terminated immediately.
 This function may only be successfully executed by the job's owner or user root.
-.LP 
-\fBslurm_kill_job_step\fR Request that a signal be sent to a specific job step. 
+.LP
+\fBslurm_kill_job_step\fR Request that a signal be sent to a specific job step.
 This function may only be successfully executed by the job's owner or user root.
 .LP
 \fBslurm_signal_job\fR Request that the specified signal be sent to all
-steps of an existing job. 
+steps of an existing job.
 .LP
 \fBslurm_signal_job_step\fR Request that the specified signal be sent to
-an existing job step. 
+an existing job step.
 .LP
 \fBslurm_terminate_job\fR Request termination of all steps of an
 existing job by sending a REQUEST_TERMINATE_JOB rpc to all slurmd in
-the the job allocation, and then calls slurm_complete_job(). 
+the the job allocation, and then calls slurm_complete_job().
 \fBslurm_signal_job_step\fR Request that terminates a job step by
 sending a REQUEST_TERMINATE_TASKS rpc to all slurmd of a job step, and
 then calls slurm_complete_job_step() after verifying that all nodes in
 the job step no longer have running tasks from the job step.  (May
-take over 35 seconds to return.) 
+take over 35 seconds to return.)
 .SH "RETURN VALUE"
 .LP
 On success, zero is returned. On error, \-1 is returned, and Slurm error code is set appropriately.
@@ -104,21 +104,21 @@ On success, zero is returned. On error, \-1 is returned, and Slurm error code is
 .LP
 \fBESLURM_DEFAULT_PARTITION_NOT_SET\fR the system lacks a valid default partition.
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
 \fBESLURM_JOB_SCRIPT_MISSING\fR the \fIbatch_flag\fP was set for a non\-batch job.
 .LP
-\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified. 
+\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job).
 .LP
-\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect. 
+\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -140,6 +140,6 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBscancel\fR(1), \fBslurm_get_errno\fR(3), 
+.LP
+\fBscancel\fR(1), \fBslurm_get_errno\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
diff --git a/doc/man/man3/slurm_load_reservations.3 b/doc/man/man3/slurm_load_reservations.3
index 8aebc9e443d9051da8e2a50d8793e4a7bdc60d63..0c103c0495a31a38d50bc762978314a9617a6c1f 100644
--- a/doc/man/man3/slurm_load_reservations.3
+++ b/doc/man/man3/slurm_load_reservations.3
@@ -1,6 +1,6 @@
 .TH "Slurm API" "3" "January 2009" "David Bremer" "Slurm reservation information reporting functions"
 .SH "NAME"
-slurm_load_reservations, slurm_free_reservation_info_msg, 
+slurm_load_reservations, slurm_free_reservation_info_msg,
 slurm_print_reservation_info, slurm_sprint_reservation_info,
 slurm_print_reservation_info_msg
 \- Slurm reservation information reporting functions
@@ -9,39 +9,39 @@ slurm_print_reservation_info_msg
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
-.LP 
+.LP
 int \fBslurm_load_reservations\fR (
-.br 
-	time_t \fIupdate_time\fR, 
-.br 
+.br
+	time_t \fIupdate_time\fR,
+.br
 	reserve_info_msg_t **\fIreservation_info_msg_pptr\fP
-.br 
+.br
  );
 .LP
 void \fBslurm_free_reservation_info_msg\fR (
-.br 
+.br
 	reserve_info_msg_t *\fIreservation_info_msg_ptr\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_reservation_info\fR (
 .br
-	FILE *\fIout_file\fP, 
+	FILE *\fIout_file\fP,
 .br
 	reserve_info_t *\fIreservation_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 char * \fBslurm_sprint_reservation_info\fR (
 .br
 	reserve_info_t *\fIreservation_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
-.LP 
+.LP
 void \fBslurm_print_reservation_info_msg\fR (
 .br
 	FILE *\fIout_file\fP,
@@ -49,67 +49,67 @@ void \fBslurm_print_reservation_info_msg\fR (
 	reserve_info_msg_t *\fIreservation_info_msg_ptr\fP,
 .br
 	int \fIone_liner\fP
-.br 
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIone_liner\fP
 Print one record per line if non\-zero.
-.TP 
+.TP
 \fIout_file\fP
 Specifies the file to print data to.
 .TP
 \fIreservation_info_msg_pptr\fP
-Specifies the double pointer to the structure to be created and filled 
-with the time of the last reservation update, a record count, and detailed 
-information about each reservation.  Detailed reservation information is 
-written to fixed sized records and includes:  reservation name, time limits, 
-access restrictions, etc.  See slurm.h for full details on the data 
-structure's contents. 
-.TP 
+Specifies the double pointer to the structure to be created and filled
+with the time of the last reservation update, a record count, and detailed
+information about each reservation.  Detailed reservation information is
+written to fixed sized records and includes:  reservation name, time limits,
+access restrictions, etc.  See slurm.h for full details on the data
+structure's contents.
+.TP
 \fIreservation_info_msg_ptr\fP
-Specifies the pointer to the structure created by \fBslurm_load_reservations\fP. 
+Specifies the pointer to the structure created by \fBslurm_load_reservations\fP.
 .TP
 \fIupdate_time\fP
-For all of the following informational calls, if update_time is equal to or greater 
-than the last time changes where made to that information, new information is 
-not returned.  Otherwise all the configuration. job, node, or reservation records 
+For all of the following informational calls, if update_time is equal to or greater
+than the last time changes where made to that information, new information is
+not returned.  Otherwise all the configuration. job, node, or reservation records
 are returned.
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_load_reservations\fR Returns a reserve_info_msg_t that contains an 
+.LP
+\fBslurm_load_reservations\fR Returns a reserve_info_msg_t that contains an
 update time, record count, and array of reservation_table records for all reservations.
-.LP 
-\fBslurm_free_reservation_info_msg\fR Release the storage generated by the 
+.LP
+\fBslurm_free_reservation_info_msg\fR Release the storage generated by the
 \fBslurm_load_reservations\fR function.
-.LP 
-\fBslurm_print_reservation_info\fR  Prints the contents of the data structure 
-describing one of the reservation records from the data loaded by the 
+.LP
+\fBslurm_print_reservation_info\fR  Prints the contents of the data structure
+describing one of the reservation records from the data loaded by the
 \fBslurm_load_reservations\fR function.
-.LP 
-\fBslurm_sprint_reservation_info\fR  Prints the sames info as 
-\fBslurm_print_reservation_info\fR, but prints to a string that must be freed 
+.LP
+\fBslurm_sprint_reservation_info\fR  Prints the sames info as
+\fBslurm_print_reservation_info\fR, but prints to a string that must be freed
 by the caller, rather than printing to a file.
-.LP 
-\fBslurm_print_reservation_info_msg\fR Prints the contents of the data 
-structure describing all reservation records loaded by the 
+.LP
+\fBslurm_print_reservation_info_msg\fR Prints the contents of the data
+structure describing all reservation records loaded by the
 \fBslurm_load_reservations\fR function.
 .SH "RETURN VALUE"
 .LP
-On success, zero is returned. On error, \-1 is returned, and Slurm error code 
+On success, zero is returned. On error, \-1 is returned, and Slurm error code
 is set appropriately.
 .SH "ERRORS"
 .LP
 \fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
 .LP
-\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link 
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link
 your code.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <stdlib.h>
@@ -117,9 +117,9 @@ SLURM controller.
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
 .br
 	int i;
@@ -142,7 +142,7 @@ int main (int argc, char *argv[])
 .LP
 	/* The easy way to print... */
 .br
-	slurm_print_reservation_info_msg(stdout, 
+	slurm_print_reservation_info_msg(stdout,
 .br
 	                                 res_info_ptr, 0);
 .LP
@@ -160,15 +160,15 @@ int main (int argc, char *argv[])
 .br
 	printf("reservations updated at %lx, records=%d\\n",
 .br
-	       res_info_ptr\->last_update, 
+	       res_info_ptr\->last_update,
 .br
 	       res_info_ptr\->record_count);
 .br
 	for (i = 0; i < res_info_ptr\->record_count; i++) {
 .br
-		printf ("reservationName=%s Nodes=%s\\n", 
+		printf ("reservationName=%s Nodes=%s\\n",
 .br
-			res_info_ptr\->reservation_array[i].name, 
+			res_info_ptr\->reservation_array[i].name,
 .br
 			res_info_ptr\->reservation_array[i].node_list );
 .br
@@ -177,11 +177,11 @@ int main (int argc, char *argv[])
 	slurm_free_reservation_info_msg (res_info_ptr);
 .br
 	return 0;
-.br 
+.br
 }
 
 .SH "NOTES"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 .LP
@@ -207,10 +207,10 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
-\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1), 
-\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3), 
+.LP
+\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1),
+\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3),
 \fBslurm_hostlist_destroy\fR(3),
-\fBslurm_get_errno\fR(3), \fBslurm_load_node\fR(3), 
+\fBslurm_get_errno\fR(3), \fBslurm_load_node\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
 
diff --git a/doc/man/man3/slurm_reconfigure.3 b/doc/man/man3/slurm_reconfigure.3
index a04c25569db67b26a34c678379a91189adc18989..dabc039a4b8f1aa12e17b1df018df110ceab3c94 100644
--- a/doc/man/man3/slurm_reconfigure.3
+++ b/doc/man/man3/slurm_reconfigure.3
@@ -1,286 +1,286 @@
 .TH "Slurm API" "3" "May 2009" "Morris Jette" "Slurm administrative calls"
 .SH "NAME"
-slurm_create_partition, slurm_create_reservation, 
-slurm_delete_partition, slurm_delete_reservation, 
+slurm_create_partition, slurm_create_reservation,
+slurm_delete_partition, slurm_delete_reservation,
 slurm_init_part_desc_msg, slurm_init_resv_desc_msg,
-slurm_reconfigure, slurm_shutdown, slurm_takeover, slurm_update_job, 
+slurm_reconfigure, slurm_shutdown, slurm_takeover, slurm_update_job,
 ,slurm_init_update_node_msg slurm_update_node, slurm_update_partition,
 slurm_update_reservation
-\- Slurm administrative functions 
+\- Slurm administrative functions
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
 .LP
-int \fBslurm_create_partition\fR ( 
-.br 
-	update_part_msg_t *\fIupdate_part_msg_ptr\fP 
-.br 
+int \fBslurm_create_partition\fR (
+.br
+	update_part_msg_t *\fIupdate_part_msg_ptr\fP
+.br
 );
 .LP
-int \fBslurm_create_reservation\fR ( 
-.br 
-	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP 
-.br 
+int \fBslurm_create_reservation\fR (
+.br
+	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP
+.br
 );
 .LP
-int \fBslurm_delete_partition\fR ( 
-.br 
-	delete_part_msg_t *\fIdelete_part_msg_ptr\fP 
-.br 
+int \fBslurm_delete_partition\fR (
+.br
+	delete_part_msg_t *\fIdelete_part_msg_ptr\fP
+.br
 );
 .LP
-int \fBslurm_delete_reservation\fR ( 
-.br 
-	reservation_name_msg_t *\fIdelete_resv_msg_ptr\fP 
-.br 
+int \fBslurm_delete_reservation\fR (
+.br
+	reservation_name_msg_t *\fIdelete_resv_msg_ptr\fP
+.br
 );
 .LP
 void \fBslurm_init_part_desc_msg\fR (
 .br
-	update_part_msg_t *\fIupdate_part_msg_ptr\fP 
-.br 
+	update_part_msg_t *\fIupdate_part_msg_ptr\fP
+.br
 );
 .LP
 void \fBslurm_init_resv_desc_msg\fR (
 .br
-	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP 
-.br 
+	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP
+.br
 );
-.LP 
+.LP
 int \fBslurm_reconfigure\fR ( );
-.LP 
-int \fBslurm_shutdown\fR ( 
+.LP
+int \fBslurm_shutdown\fR (
 .br
 	uint16_t \fIshutdown_options\fP
 .br
 );
-.LP 
+.LP
 int \fBslurm_takeover\fR ( );
 .LP
 int \fBslurm_update_job\fR (
-.br 
+.br
 	job_desc_msg_t *\fIupdate_job_msg_ptr\fP
-.br 
+.br
 );
 .LP
 void \fBslurm_init_update_node_msg\fR(
 .br
-	update_node_msg_t *\fIupdate_node_msg_ptr\fP 
-.br 
+	update_node_msg_t *\fIupdate_node_msg_ptr\fP
+.br
 );
 .LP
-int \fBslurm_update_node\fR ( 
-.br 
-	update_node_msg_t *\fIupdate_node_msg_ptr\fP 
-.br 
+int \fBslurm_update_node\fR (
+.br
+	update_node_msg_t *\fIupdate_node_msg_ptr\fP
+.br
 );
 .LP
-int \fBslurm_update_partition\fR ( 
-.br 
-	update_part_msg_t *\fIupdate_part_msg_ptr\fP 
-.br 
+int \fBslurm_update_partition\fR (
+.br
+	update_part_msg_t *\fIupdate_part_msg_ptr\fP
+.br
 );
 .LP
-int \fBslurm_update_reservation\fR ( 
-.br 
-	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP 
-.br 
+int \fBslurm_update_reservation\fR (
+.br
+	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP
+.br
 );
 .SH "ARGUMENTS"
-.LP 
-.TP 
+.LP
+.TP
 \fIshutdown_options\fP
 0: all slurm daemons are shutdown
 .br
 1: slurmctld generates a core file
 .br
 2: only the slurmctld is shutdown (no core file)
-.TP 
+.TP
 \fIdelete_part_msg_ptr\fP
-Specifies the pointer to a partition delete request specification. 
-See slurm.h for full details on the data structure's contents. 
-.TP 
+Specifies the pointer to a partition delete request specification.
+See slurm.h for full details on the data structure's contents.
+.TP
 \fIdelete_resv_msg_ptr\fP
-Specifies the pointer to a reservation delete request specification. 
-See slurm.h for full details on the data structure's contents. 
+Specifies the pointer to a reservation delete request specification.
+See slurm.h for full details on the data structure's contents.
 .TP
 \fIupdate_job_msg_ptr\fP
-Specifies the pointer to a job update request specification. See slurm.h 
-for full details on the data structure's contents. 
-.TP 
+Specifies the pointer to a job update request specification. See slurm.h
+for full details on the data structure's contents.
+.TP
 \fIupdate_node_msg_ptr\fP
-Specifies the pointer to a node update request specification. See slurm.h 
-for full details on the data structure's contents. 
-.TP 
+Specifies the pointer to a node update request specification. See slurm.h
+for full details on the data structure's contents.
+.TP
 \fIupdate_part_msg_ptr\fP
-Specifies the pointer to a partition create or update request specification. 
-See slurm.h for full details on the data structure's contents. 
-.TP 
+Specifies the pointer to a partition create or update request specification.
+See slurm.h for full details on the data structure's contents.
+.TP
 \fIupdate_resv_msg_ptr\fP
-Specifies the pointer to a reservation create or update request specification. 
-See slurm.h for full details on the data structure's contents. 
+Specifies the pointer to a reservation create or update request specification.
+See slurm.h for full details on the data structure's contents.
 .SH "DESCRIPTION"
-.LP 
-\fBslurm_create_partition\fR Request that a new partition be created. 
-Initialize the data structure using the \fBslurm_init_part_desc_msg\fR 
+.LP
+\fBslurm_create_partition\fR Request that a new partition be created.
+Initialize the data structure using the \fBslurm_init_part_desc_msg\fR
 function prior to setting values of the parameters to be changed.
-Note: \fBslurm_init_part_desc_msg\fR is not equivalent to setting the data 
+Note: \fBslurm_init_part_desc_msg\fR is not equivalent to setting the data
 structure values to zero.  A partition name must be set for the call to
 succeed.
 This function may only be successfully executed by user root.
-.LP 
-\fBslurm_create_reservation\fR Request that a new reservation be created. 
-Initialize the data structure using the \fBslurm_init_resv_desc_msg\fR 
+.LP
+\fBslurm_create_reservation\fR Request that a new reservation be created.
+Initialize the data structure using the \fBslurm_init_resv_desc_msg\fR
 function prior to setting values of the parameters to be changed.
-Note: \fBslurm_init_resv_desc_msg\fR is not equivalent to setting the data 
-structure values to zero.  The reservation's time limits, user or 
+Note: \fBslurm_init_resv_desc_msg\fR is not equivalent to setting the data
+structure values to zero.  The reservation's time limits, user or
 account restrictions, and node names or a node count must be specified for
 the call to succeed.
 This function may only be successfully executed by user root.
-.LP 
-\fBslurm_delete_partition\fR Request that the specified partition be deleted. 
-All jobs associated with the identified partition will be terminated and 
+.LP
+\fBslurm_delete_partition\fR Request that the specified partition be deleted.
+All jobs associated with the identified partition will be terminated and
 purged.  This function may only be successfully executed by user root.
-.LP 
-\fBslurm_delete_reservation\fR Request that the specified reservation be 
+.LP
+\fBslurm_delete_reservation\fR Request that the specified reservation be
 deleted. This function may only be successfully executed by user root.
-.LP 
-\fBslurm_init_part_desc_msg\fR Initialize the contents of a partition 
-descriptor with default values. Note: \fBslurm_init_part_desc_msg\fR is 
-not equivalent to setting the data structure values to zero. Execute 
-this function before executing \fBslurm_create_partition\fR or 
+.LP
+\fBslurm_init_part_desc_msg\fR Initialize the contents of a partition
+descriptor with default values. Note: \fBslurm_init_part_desc_msg\fR is
+not equivalent to setting the data structure values to zero. Execute
+this function before executing \fBslurm_create_partition\fR or
 \fBslurm_update_partition\fR.
-.LP 
-\fBslurm_init_resv_desc_msg\fR Initialize the contents of a reservation 
-descriptor with default values. Note: \fBslurm_init_resv_desc_msg\fR is 
-not equivalent to setting the data structure values to zero. Execute this 
-function before executing \fBslurm_create_reservation\fR or 
+.LP
+\fBslurm_init_resv_desc_msg\fR Initialize the contents of a reservation
+descriptor with default values. Note: \fBslurm_init_resv_desc_msg\fR is
+not equivalent to setting the data structure values to zero. Execute this
+function before executing \fBslurm_create_reservation\fR or
 \fBslurm_update_reservation\fR.
-.LP 
-\fBslurm_reconfigure\fR Request that the Slurm controller re\-read its 
-configuration file. The new configuration parameters take effect 
+.LP
+\fBslurm_reconfigure\fR Request that the Slurm controller re\-read its
+configuration file. The new configuration parameters take effect
 immediately. This function may only be successfully executed by user root.
-.LP 
-\fBslurm_shutdown\fR Request that the Slurm controller terminate. This 
+.LP
+\fBslurm_shutdown\fR Request that the Slurm controller terminate. This
 function may only be successfully executed by user root.
-.LP 
-\fBslurm_takeover\fR Request that the Slurm primary controller shutdown 
+.LP
+\fBslurm_takeover\fR Request that the Slurm primary controller shutdown
 immediately and the backup controller take over.
 This function may only be successfully executed by user root.
-.LP 
-\fBslurm_update_job\fR Request that the configuration of a job be updated. Note 
-that most, but not all parameters of a job may be changed by this function. 
-Initialize the data structure using the \fBslurm_init_job_desc_msg\fR function 
-prior to setting values of the parameters to be changed. Note: 
-\fBslurm_init_job_desc_msg\fR is not equivalent to setting the data structure 
-values to zero. This function may only be successfully executed by user root. 
-Note the job priority of zero represents a job that will not be scheduled. 
-Slurm uses the priority one to represent jobs that can not be scheduled until 
-additional nodes are returned to service (i.e. not DOWN, DRAINED, or FAILED). 
+.LP
+\fBslurm_update_job\fR Request that the configuration of a job be updated. Note
+that most, but not all parameters of a job may be changed by this function.
+Initialize the data structure using the \fBslurm_init_job_desc_msg\fR function
+prior to setting values of the parameters to be changed. Note:
+\fBslurm_init_job_desc_msg\fR is not equivalent to setting the data structure
+values to zero. This function may only be successfully executed by user root.
+Note the job priority of zero represents a job that will not be scheduled.
+Slurm uses the priority one to represent jobs that can not be scheduled until
+additional nodes are returned to service (i.e. not DOWN, DRAINED, or FAILED).
 This permits lower priority jobs to utilize those resources which are available.
-.LP 
-\fBslurm_init_update_node_msg\fR Initialize the contents of an update mpde 
-descriptor with default values. Note: \fBslurm_init_update_node_msg\fR is 
-not equivalent to setting the data structure values to zero. Execute 
+.LP
+\fBslurm_init_update_node_msg\fR Initialize the contents of an update mpde
+descriptor with default values. Note: \fBslurm_init_update_node_msg\fR is
+not equivalent to setting the data structure values to zero. Execute
 this function before executing \fBslurm_update_node\fR.
-.LP 
-\fBslurm_update_node\fR Request that the state of one or more nodes be updated. 
-Note that the state of a node (e.g. DRAINING, IDLE, etc.) may be changed, but 
-its hardware configuration may not be changed by this function. If the hardware 
+.LP
+\fBslurm_update_node\fR Request that the state of one or more nodes be updated.
+Note that the state of a node (e.g. DRAINING, IDLE, etc.) may be changed, but
+its hardware configuration may not be changed by this function. If the hardware
 configuration of a node changes, update the Slurm configuration file and execute
-the \fBslurm_reconfigure\fR function. This function may only be successfully 
-executed by user root. If used by some autonomous program, the state value 
-most likely to be used is \fBNODE_STATE_DRAIN\fR or \fBNODE_STATE_FAILING\fR. 
-The node state flag \fBNODE_STATE_NO_RESPOND\fR may be specified without 
-changing the underlying node state. Note that the node's 
-\fBNODE_STATE_NO_RESPOND\fR flag will be cleared as soon as the slurmd 
+the \fBslurm_reconfigure\fR function. This function may only be successfully
+executed by user root. If used by some autonomous program, the state value
+most likely to be used is \fBNODE_STATE_DRAIN\fR or \fBNODE_STATE_FAILING\fR.
+The node state flag \fBNODE_STATE_NO_RESPOND\fR may be specified without
+changing the underlying node state. Note that the node's
+\fBNODE_STATE_NO_RESPOND\fR flag will be cleared as soon as the slurmd
 daemon on that node communicates with the slurmctld daemon.
-Likewise the state \fBNODE_STATE_DOWN\fR indicates that the slurmd daemon 
-is not responding (and has not responded for an interval at least as long 
-as the \fBSlurmdTimeout\fR configuration parameter). The node will leave the 
+Likewise the state \fBNODE_STATE_DOWN\fR indicates that the slurmd daemon
+is not responding (and has not responded for an interval at least as long
+as the \fBSlurmdTimeout\fR configuration parameter). The node will leave the
 \fBNODE_STATE_DOWN\fR state as soon as  the slurmd daemon communicates.
-.LP 
-\fBslurm_update_partition\fR Request that the configuration of a 
-partition be updated.  Note that most, but not all parameters of a 
-partition may be changed by this function. Initialize the data 
-structure using the \fBslurm_init_part_desc_msg\fR function prior 
-to setting values of the parameters to be changed. Note: 
-\fBslurm_init_part_desc_msg\fR is not equivalent to setting the 
-data structure values to zero. This function may only be 
+.LP
+\fBslurm_update_partition\fR Request that the configuration of a
+partition be updated.  Note that most, but not all parameters of a
+partition may be changed by this function. Initialize the data
+structure using the \fBslurm_init_part_desc_msg\fR function prior
+to setting values of the parameters to be changed. Note:
+\fBslurm_init_part_desc_msg\fR is not equivalent to setting the
+data structure values to zero. This function may only be
 successfully executed by user root.
-.LP 
-\fBslurm_update_reservation\fR Request that the configuration of a 
-reservation be updated.  Initialize the data structure using the 
-\fBslurm_init_resv_desc_msg\fR function prior to setting values of 
-the parameters to be changed. Note:  \fBslurm_init_resv_desc_msg\fR 
-is not equivalent to setting the data structure values to zero. This 
+.LP
+\fBslurm_update_reservation\fR Request that the configuration of a
+reservation be updated.  Initialize the data structure using the
+\fBslurm_init_resv_desc_msg\fR function prior to setting values of
+the parameters to be changed. Note:  \fBslurm_init_resv_desc_msg\fR
+is not equivalent to setting the data structure values to zero. This
 function may only be successfully executed by user root.
 .SH "RETURN VALUE"
 .LP
-On success, zero is returned. On error, \-1 is returned, and the Slurm error 
+On success, zero is returned. On error, \-1 is returned, and the Slurm error
 code is set appropriately.
 .LP
-Exception:  A successful slurm_create_reservation call returns a string 
+Exception:  A successful slurm_create_reservation call returns a string
 containing the name of the reservation, in memory to be freed by the caller.
 A failed call returns NULL and sets the Slurm error code.
 .SH "ERRORS"
 .LP
-\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your 
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your
 code.
 .LP
-\fBESLURM_INVALID_NODE_NAME\fR The requested node name(s) is/are not valid. 
+\fBESLURM_INVALID_NODE_NAME\fR The requested node name(s) is/are not valid.
 .LP
-\fBESLURM_INVALID_NODE_STATE\fR The specified state node state or requested 
-node state transition is not valid. 
+\fBESLURM_INVALID_NODE_STATE\fR The specified state node state or requested
+node state transition is not valid.
 .LP
-\fBESLURM_INVALID_PARTITION_NAME\fR The requested partition name is not valid. 
+\fBESLURM_INVALID_PARTITION_NAME\fR The requested partition name is not valid.
 .LP
 \fBESLURM_INVALID_AUTHTYPE_CHANGE\fR The \fBAuthType\fR parameter can
 not be changed using the \fBslurm_reconfigure\fR function, but all SLURM
-daemons and commands must be restarted. See \fBslurm.conf\fR(5) for more 
+daemons and commands must be restarted. See \fBslurm.conf\fR(5) for more
 information.
 .LP
-\fBESLURM_INVALID_SCHEDTYPE_CHANGE\fR The \fBSchedulerType\fR parameter can 
-not be changed using the \fBslurm_reconfigure\fR function, but the 
-\fBslurmctld\fR daemon must be restarted. Manual changes to existing job 
+\fBESLURM_INVALID_SCHEDTYPE_CHANGE\fR The \fBSchedulerType\fR parameter can
+not be changed using the \fBslurm_reconfigure\fR function, but the
+\fBslurmctld\fR daemon must be restarted. Manual changes to existing job
 parameters may also be required. See \fBslurm.conf\fR(5) for more information.
 .LP
 \fBESLURM_INVALID_SWITCHTYPE_CHANGE\fR The \fBSwitchType\fR parameter can
 not be changed using the \fBslurm_reconfigure\fR function, but all
-SLURM daemons and commands must be restarted. All previously running 
+SLURM daemons and commands must be restarted. All previously running
 jobs will be lost. See \fBslurm.conf\fR(5) for more information.
 .LP
-\fBESLURM_ACCESS_DENIED\fR The requesting user lacks authorization for 
-the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR The requesting user lacks authorization for
+the requested action (e.g. trying to delete or modify another user's job).
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .LP
-\fBESLURM_RESERVATION_ACCESS\fR  Requestor is not authorized to access the 
+\fBESLURM_RESERVATION_ACCESS\fR  Requestor is not authorized to access the
 reservation.
 .LP
-\fBESLURM_RESERVATION_INVALID\fR  Invalid reservation parameter given, 
+\fBESLURM_RESERVATION_INVALID\fR  Invalid reservation parameter given,
 e.g. wrong name given.
 .LP
 \fBESLURM_INVALID_TIME_VALUE\fR  Invalid time value.
 .LP
-\fBESLURM_RESERVATION_BUSY\fR  Reservation is busy, e.g. trying to delete a 
+\fBESLURM_RESERVATION_BUSY\fR  Reservation is busy, e.g. trying to delete a
 reservation while in use.
 .LP
-\fBESLURM_RESERVATION_NOT_USABLE\fR  Reservation not usable, e.g. trying to 
+\fBESLURM_RESERVATION_NOT_USABLE\fR  Reservation not usable, e.g. trying to
 use an expired reservation.
 .SH "EXAMPLE"
-.LP 
+.LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
-.LP 
+.LP
 int main (int argc, char *argv[])
-.br 
+.br
 {
-.br 
+.br
 	job_desc_msg_t          update_job_msg;
 .br
 	update_node_msg_t       update_node_msg;
@@ -292,105 +292,105 @@ int main (int argc, char *argv[])
 	reserve_request_msg_t   resv_msg;
 .br
 	char                   *resv_name = NULL;
-.LP 
+.LP
 	if (slurm_reconfigure ( )) {
 .br
 		slurm_perror ("slurm_reconfigure error");
-.br 
+.br
 		exit (1);
 .br
 	}
-.LP 
+.LP
 	slurm_init_job_desc_msg( &update_job_msg );
-.br 
+.br
 	update_job_msg.job_id = 1234;
-.br 
+.br
 	update_job_msg time_limit = 200;
-.br 
-	if (slurm_update_job (&update_job_msg)) { 
+.br
+	if (slurm_update_job (&update_job_msg)) {
 .br
 		slurm_perror ("slurm_update_job error");
-.br 
+.br
 		exit (1);
 .br
 	}
-.LP 
+.LP
 	slurm_init_part_desc_msg ( &update_part_msg );
 .br
 	update_part_msg.name = "test.partition";
 .br
 	update_part_msg.state_up = 0;  /* partition down */
-.br 
-	if (slurm_create_partition (&update_part_msg)) { 
+.br
+	if (slurm_create_partition (&update_part_msg)) {
 .br
 		slurm_perror ("slurm_create_partition error");
-.br 
+.br
 		exit (1);
 .br
 	}
-.LP 
+.LP
 	update_part_msg.state_up = 1;  /* partition up */
-.br 
-	if (slurm_update_partition (&update_part_msg)) { 
+.br
+	if (slurm_update_partition (&update_part_msg)) {
 .br
 		slurm_perror ("slurm_update_partition error");
-.br 
+.br
 		exit (1);
 .br
 	}
-.LP 
+.LP
 	delete_part_msg.name = "test.partition";
-.br 
-	if (slurm_delete_partition (&delete_part_msg)) { 
+.br
+	if (slurm_delete_partition (&delete_part_msg)) {
 .br
 		slurm_perror ("slurm_delete_partition error");
-.br 
+.br
 		exit (1);
 .br
 	}
-.LP 
+.LP
 	slurm_init_update_node_msg (&update_node_msg);
 .br
 	update_node_msg.node_names = "lx[10\-12]";
 .br
 	update_node_msg.node_state = NODE_STATE_DRAINING ;
-.br 
-	if (slurm_update_node (&update_node_msg)) { 
+.br
+	if (slurm_update_node (&update_node_msg)) {
 .br
 		slurm_perror ("slurm_update_node error");
-.br 
+.br
 		exit (1);
-.br 
+.br
 	}
-.LP 
+.LP
 	slurm_init_resv_desc_msg ( &resv_msg );
 .br
-	resv_msg.start_time = time(NULL) + 60*60;  /* One hour from now */ 
+	resv_msg.start_time = time(NULL) + 60*60;  /* One hour from now */
 .br
 	resv_msg.duration = 720;  /* 12 hours/720 minutes */
 .br
-	resv_msg.node_cnt = 10; 
+	resv_msg.node_cnt = 10;
 .br
 	resv_msg.accounts = "admin";
-.br 
-	resv_name = slurm_create_reservation (&resv_msg); 
+.br
+	resv_name = slurm_create_reservation (&resv_msg);
 .br
 	if (!resv_name) {
 .br
 		slurm_perror ("slurm_create_reservation error");
-.br 
+.br
 		exit (1);
 .br
 	}
 .br
 	free(resv_name);
-.br 
+.br
 	exit (0);
-.br 
+.br
 }
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -412,6 +412,6 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
+.LP
 \fBscontrol\fR(1), \fBslurm_get_errno\fR(3), \fBslurm_init_job_desc_msg\fR(3),
 \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3), \fBslurm.conf\fR(5)
diff --git a/doc/man/man3/slurm_resume.3 b/doc/man/man3/slurm_resume.3
index 70942cb8cca84da2ccd1aaf80b9c1591f2d8b253..676eddc7f2a894dc0f77d9316697f38d06e63ad8 100644
--- a/doc/man/man3/slurm_resume.3
+++ b/doc/man/man3/slurm_resume.3
@@ -4,7 +4,7 @@
 slurm_suspend, slurm_resume, slurm_requeue \- Slurm suspend, resume and requeue functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
 .LP
 .LP
@@ -27,7 +27,7 @@ int \fBslurm_requeue\fR (
 );
 
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fIjob_id\fP
 SLURM job ID to perform the operation upon.
@@ -42,12 +42,12 @@ Resume execution of a previously suspended job.
 .LP
 \fBslurm_requeue\fR
 Requeue a running or pending SLURM batch job.
-The job script will be restarted from its beginning, 
+The job script will be restarted from its beginning,
 ignoring any previous checkpoint.
 
 .SH "RETURN VALUE"
 .LP
-Zero is returned upon success. 
+Zero is returned upon success.
 On error, \-1 is returned, and the Slurm error code is set appropriately.
 .SH "ERRORS"
 .LP
@@ -56,10 +56,10 @@ On error, \-1 is returned, and the Slurm error code is set appropriately.
 resume a job that is not currently suspended, or
 requeue a job on which the operation has been disabled).
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the 
-requested action (e.g. not user root or SlurmUser). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the
+requested action (e.g. not user root or SlurmUser).
 .LP
 \fBESLURM_JOB_PENDING\fR the requested job is still pending.
 .LP
@@ -68,7 +68,7 @@ requested action (e.g. not user root or SlurmUser).
 \fBESLURM_NOT_SUPPORTED\fR the requested operation is not supported on this system.
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -91,5 +91,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-.LP 
+.LP
 \fBscontrol\fR(1)
diff --git a/doc/man/man3/slurm_slurmd_status.3 b/doc/man/man3/slurm_slurmd_status.3
index 1f4ff24faf35bfd52dcfb071cbb5d4af91b8a215..0091610997f04ff1d71e4809e4749440e9800f0e 100644
--- a/doc/man/man3/slurm_slurmd_status.3
+++ b/doc/man/man3/slurm_slurmd_status.3
@@ -6,19 +6,19 @@ slurm_free_slurmd_status, slurm_load_slurmd_status, slurm_print_slurmd_status
 \- Slurmd status functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
 .LP
 .LP
 void \fBslurm_free_slurmd_status\fR (
 .br
-	slurmd_status_t* \fIslurmd_status_ptr\fP 
+	slurmd_status_t* \fIslurmd_status_ptr\fP
 .br
 );
 .LP
 int \fBslurm_load_slurmd_status\fR (
 .br
-	slurmd_status_t** \fIslurmd_status_ptr\fP 
+	slurmd_status_t** \fIslurmd_status_ptr\fP
 .br
 );
 .LP
@@ -26,14 +26,14 @@ void \fBslurm_print_slurmd_status\fR (
 .br
 	FILE *\fIout\fP,
 .br
-	slurmd_status_t* \fIslurmd_status_pptr\fP 
+	slurmd_status_t* \fIslurmd_status_pptr\fP
 .br
 );
 
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
-\fIslurmd_status_ptr\fP 
+\fIslurmd_status_ptr\fP
 Slurmd status pointer.  Created by \fBslurm_load_slurmd_status\fR,
 used in subsequent function calls, and destroyed by
 \fBslurm_free_slurmd_status\fR.
@@ -41,7 +41,7 @@ used in subsequent function calls, and destroyed by
 .SH "DESCRIPTION"
 .LP
 \fBslurm_free_slurmd_status\fR free slurmd state information.
-.LP 
+.LP
 \fBslurm_load_slurmd_status\fR issue RPC to get the status of slurmd
 daemon on this machine.
 .LP
diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3
index 13b35e7f6931f5a76a920eb069630fac6e386a49..dd50639632cf2b9809238d4486d2ba0f07ae9f1b 100644
--- a/doc/man/man3/slurm_step_ctx_create.3
+++ b/doc/man/man3/slurm_step_ctx_create.3
@@ -7,19 +7,19 @@ slurm_step_ctx_params_t_init, slurm_jobinfo_ctx_get,
 slurm_spawn_kill, slurm_step_ctx_destroy \- Slurm task spawn functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
-.LP 
+.LP
 .LP
 slurm_step_ctx \fBslurm_step_ctx_create\fR (
 .br
-	slurm_step_ctx_params_t *\fIstep_req\fP 
+	slurm_step_ctx_params_t *\fIstep_req\fP
 .br
 );
 .LP
 slurm_step_ctx \fBslurm_step_ctx_create_no_alloc\fR (
 .br
-	slurm_step_ctx_params_t *\fIstep_req\fP 
+	slurm_step_ctx_params_t *\fIstep_req\fP
 .br
 );
 .LP
@@ -41,9 +41,9 @@ int \fBslurm_step_ctx_get\fR (
 .LP
 int \fBslurm_jobinfo_ctx_get\fR (
 .br
-	switch_jobinfo_t \fIjobinfo\fP, 
+	switch_jobinfo_t \fIjobinfo\fP,
 .br
-	int \fIdata_type\fP, 
+	int \fIdata_type\fP,
 .br
 	void *\fIdata\fP
 .br
@@ -51,7 +51,7 @@ int \fBslurm_jobinfo_ctx_get\fR (
 .LP
 void \fBslurm_step_ctx_params_t_init\fR (
 .br
-	slurm_step_ctx_params_t *\fIstep_req\fP 
+	slurm_step_ctx_params_t *\fIstep_req\fP
 .br
 );
 .LP
@@ -77,10 +77,10 @@ int \fBslurm_step_ctx_destroy\fR {
 .br
 );
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fIstep_req\fP
-Specifies the pointer to the structure with job step request specification. See 
+Specifies the pointer to the structure with job step request specification. See
 slurm.h for full details on the data structure's contents.
 .TP
 \fIctx\fP
@@ -100,25 +100,25 @@ upon the switch plugin in use.
 .TP
 \fIfd_array\fP
 Array of socket file descriptors to be connected to the initiated tasks.
-Tasks will be connected to these file descriptors in order of their 
+Tasks will be connected to these file descriptors in order of their
 task id.
 This socket will carry standard input, output and error for the task.
 \fIjobinfo\fP
 Switch\-specific job information as returned by \fBslurm_step_ctx_get\fR.
 .TP
 \fIsignal\fP
-Signal to be sent to the spawned tasks. 
+Signal to be sent to the spawned tasks.
 .SH "DESCRIPTION"
 .LP
 \fBslurm_jobinfo_ctx_get\fR Get values from a \fIjobinfo\fR field as
-returned by \fBslurm_step_ctx_get\fR. The operation of this function 
+returned by \fBslurm_step_ctx_get\fR. The operation of this function
 is highly dependent upon the switch plugin in use.
 .LP
-\fBslurm_step_ctx_create\fR Create a job step context. To avoid memory 
+\fBslurm_step_ctx_create\fR Create a job step context. To avoid memory
 leaks call \fBslurm_step_ctx_destroy\fR when the use of this context is
-finished. NOTE: this function creates a slurm job step. Call \fBslurm_spawn\fR 
+finished. NOTE: this function creates a slurm job step. Call \fBslurm_spawn\fR
 in a timely fashion to avoid having job step credentials time out. If
-\fBslurm_spawn\fR is not used, explicitly cancel the job step. 
+\fBslurm_spawn\fR is not used, explicitly cancel the job step.
 .LP
 \fBslurm_step_ctx_create_no_alloc\fR Same as above, only no
 allocation is made. To avoid memory leaks call
@@ -126,7 +126,7 @@ allocation is made. To avoid memory leaks call
 .LP
 \fBslurm_step_ctx_daemon_per_node_hack\fR Hack the step context to run
 a single process per node, regardless of the settings selected at
-slurm_step_ctx_create time. 
+slurm_step_ctx_create time.
 .LP
 \fBslurm_step_ctx_get\fR Get values from a job step context.
 \fIctx_key\fP identifies the fields to be gathered from the job step context.
@@ -137,17 +137,17 @@ of \fIctx_key\fP. See the \fBCONTEXT KEYS\fR section for details.
 structure that you will pass to slurm_step_ctx_create().
 .LP
 \fBslurm_spawn\fR Spawn tasks based upon a job step context
-and establish communications with the tasks using the socket 
+and establish communications with the tasks using the socket
 file descriptors specified.
-Note that this function can only be called once for each job 
-step context. 
+Note that this function can only be called once for each job
+step context.
 Establish a new job step context for each set of tasks to be spawned.
 .LP
-\fBslurm_spawn_kill\fR Signal the tasks spawned for this context 
+\fBslurm_spawn_kill\fR Signal the tasks spawned for this context
 by \fBslurm_spawn\fR.
 .LP
 \fBslurm_step_ctx_destroy\fR Destroy a job step context created by
-\fBslurm_step_ctx_create\fR. 
+\fBslurm_step_ctx_create\fR.
 .SH "CONEXT KEYS"
 .TP
 \fBSLURM_STEP_CTX_ARGS\fR
@@ -156,17 +156,17 @@ Accepts two additional arguments, the first of type int and
 the second of type char **.
 .TP
 \fBSLURM_STEP_CTX_CHDIR\fR
-Have the remote process change directory to the specified location 
+Have the remote process change directory to the specified location
 before beginning execution. Accepts one argument of type
 char * identifying the directory's pathname. By default
 the remote process will execute in the same directory pathname
-from which it is spawned. NOTE: This assumes that same directory 
-pathname exists on the other nodes. 
+from which it is spawned. NOTE: This assumes that same directory
+pathname exists on the other nodes.
 .TP
 \fBSLURM_STEP_CTX_ENV\fR
 Sets the environment variable count and values for the executable.
 Accepts two additional arguments, the first of type int and
-the second of type char **. By default the current environment 
+the second of type char **. By default the current environment
 variables are copied to started task's environment.
 .TP
 \fBSLURM_STEP_CTX_RESP\fR
@@ -179,28 +179,28 @@ Accepts one additional argument of type uint32_t *.
 .TP
 \fBSLURM_STEP_CTX_TASKS\fR
 Get the number of tasks per node for a given job.
-Accepts one additional argument of type uint32_t **. 
-This argument will be set to point to an array with the 
+Accepts one additional argument of type uint32_t **.
+This argument will be set to point to an array with the
 task counts of each node in an element of the array.
-See \fBSLURM_STEP_CTX_TID\fR below to determine the 
+See \fBSLURM_STEP_CTX_TID\fR below to determine the
 task ID numbers associated with each of those tasks.
 .TP
 \fBSLURM_STEP_CTX_TID\fR
-Get the task ID numbers associated with the tasks allocated to 
+Get the task ID numbers associated with the tasks allocated to
 a specific node.
 Accepts two additional arguments, the first of type int and
-the second of type uint32_t **. The first argument identifies 
-the node number of interest (zero origin). The second argument 
-will be set to point to an array with the task ID numbers of 
-each task allocated to the node (also zero origin). 
-See \fBSLURM_STEP_CTX_TASKS\fR above to determine how many 
+the second of type uint32_t **. The first argument identifies
+the node number of interest (zero origin). The second argument
+will be set to point to an array with the task ID numbers of
+each task allocated to the node (also zero origin).
+See \fBSLURM_STEP_CTX_TASKS\fR above to determine how many
 tasks are associated with each node.
 .SH "RETURN VALUE"
 .LP
 For \fB slurm_step_ctx_create\fR a context is return upon success. On error
 NULL is returned and the Slurm error code is set appropriately.
 .LP
-For all other functions zero is returned upon success. 
+For all other functions zero is returned upon success.
 On error, \-1 is returned, and the Slurm error code is set appropriately.
 .SH "ERRORS"
 .LP
@@ -208,20 +208,20 @@ On error, \-1 is returned, and the Slurm error code is set appropriately.
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
-\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified. 
+\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job).
 .LP
 \fBESLURM_DISABLED\fR the ability to create a job step is currently disabled.
 This is indicative of the job being suspended. Retry the call as desired.
 .LP
-\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect. 
+\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
-\fBESLURM_BAD_DIST\fR task distribution specification is invalid. 
+\fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 .SH "EXAMPLE
 .LP
@@ -229,7 +229,7 @@ SEE \fBslurm_step_launch\fR(3) man page for an example of slurm_step_ctx_create
 and slurm_step_launch in use together.
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -251,8 +251,8 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBslurm_allocate_resources\fR(3), \fBslurm_job_step_create\fR(3), 
-\fBslurm_kill_job\fR(3), 
+.LP
+\fBslurm_allocate_resources\fR(3), \fBslurm_job_step_create\fR(3),
+\fBslurm_kill_job\fR(3),
 \fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3),
-\fBsrun\fR(1) 
+\fBsrun\fR(1)
diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3
index fb2c4a7393b3396df31187333e39021ecc20465b..1e7bbff705024991b83ab7ded8bb9f3969a85cdf 100644
--- a/doc/man/man3/slurm_step_launch.3
+++ b/doc/man/man3/slurm_step_launch.3
@@ -7,13 +7,13 @@ slurm_step_launch_fwd_signal, slurm_step_launch_wait_start,
 slurm_step_launch_wait_finish, slurm_step_launch_abort \- Slurm job step launch functions
 
 .SH "SYNTAX"
-.LP 
+.LP
 #include <slurm/slurm.h>
-.LP 
+.LP
 .LP
 void \fBslurm_step_launch_params_t_init\fR (
 .br
-	slurm_step_launch_params_t *\fIlaunch_req\fP 
+	slurm_step_launch_params_t *\fIlaunch_req\fP
 .br
 );
 .LP
@@ -54,7 +54,7 @@ void \fBslurm_step_launch_abort\fR {
 );
 
 .SH "ARGUMENTS"
-.LP 
+.LP
 .TP
 \fIcallbacks\fP
 Identify functions to be called when various events occur.
@@ -64,7 +64,7 @@ Job step context. Created by \fBslurm_step_ctx_create\fR, used in subsequent
 function calls, and destroyed by \fBslurm_step_ctx_destroy\fR.
 .TP
 \fIlaunch_req\fP
-Pointer to a structure allocated by the user containing specifications of 
+Pointer to a structure allocated by the user containing specifications of
 the job step to be launched.
 
 .SH "DESCRIPTION"
@@ -80,24 +80,24 @@ with running tasks.
 .LP
 \fBslurm_step_launch_wait_start\fR Block until all tasks have started.
 .LP
-\fBslurm_step_launch_wait_finish\fR Block until all tasks have finished 
+\fBslurm_step_launch_wait_finish\fR Block until all tasks have finished
 (or failed to start altogether).
 .LP
-\fBslurm_step_launch_abort\fR Abort an in-progress launch, or terminate 
+\fBslurm_step_launch_abort\fR Abort an in-progress launch, or terminate
 the fully launched job step. Can be called from a signal handler.
 
 .SH "IO Redirection"
 .LP
-Use the \fIlocal_fds\fR entry in  \fIslurm_step_launch_params_t\fR 
-to specify file descriptors to be used for standard input, output 
-and error. Any \fIlocal_fds\fR not specified will result in the launched 
+Use the \fIlocal_fds\fR entry in  \fIslurm_step_launch_params_t\fR
+to specify file descriptors to be used for standard input, output
+and error. Any \fIlocal_fds\fR not specified will result in the launched
 tasks using the calling process's standard input, output and error.
 Threads created by \fBslurm_step_launch\fR will completely handle
 copying data between the remote processes and the specified local file
-descriptors. 
+descriptors.
 .LP
-Use the substructure in \fIslurm_step_io_fds_t\fR to restrict the 
-redirection of I/O to a specific node or task ID. For example, to 
+Use the substructure in \fIslurm_step_io_fds_t\fR to restrict the
+redirection of I/O to a specific node or task ID. For example, to
 redirect standard output only from task 0, set
 .LP
 .nf
@@ -105,14 +105,14 @@ params.local_fs.out.taskid=0;
 .fi
 .LP
 Use the \fIremote_*_filename\fR fields in \fIslurm_step_launch_params_t\fR
-to have launched tasks read and/or write directly to local files 
+to have launched tasks read and/or write directly to local files
 rather than transferring data over the network to the calling process.
 These strings support many of the same format options as the \fBsrun\fR
-command. Any \fIremote_*_filename\fR fields set will supersede the 
-corresponding \fIlocal_fds\fR entries. For example, the following 
+command. Any \fIremote_*_filename\fR fields set will supersede the
+corresponding \fIlocal_fds\fR entries. For example, the following
 code will direct each task to write standard output and standard
-error to local files with names containing the task ID (e.g. 
-"/home/bob/test_output/run1.out.0" and 
+error to local files with names containing the task ID (e.g.
+"/home/bob/test_output/run1.out.0" and
 "/home/bob/test_output/run.1.err.0" for task 0).
 .LP
 .nf
@@ -132,17 +132,17 @@ or SLURM_ERROR if the job step is aborted during launch.
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
 .LP
-\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist. 
+\fBESLURM_INVALID_JOB_ID\fR the requested job id does not exist.
 .LP
-\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified. 
+\fBESLURM_ALREADY_DONE\fR the specified job has already completed and can not be modified.
 .LP
-\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job). 
+\fBESLURM_ACCESS_DENIED\fR the requesting user lacks authorization for the requested action (e.g. trying to delete or modify another user's job).
 .LP
-\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect. 
+\fBESLURM_INTERCONNECT_FAILURE\fR failed to configure the node interconnect.
 .LP
-\fBESLURM_BAD_DIST\fR task distribution specification is invalid. 
+\fBESLURM_BAD_DIST\fR task distribution specification is invalid.
 .LP
-\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
 SLURM controller.
 
 .SH "EXAMPLE
@@ -222,7 +222,7 @@ int main (int argc, char *argv[])
 .fi
 
 .SH "NOTE"
-These functions are included in the libslurm library, 
+These functions are included in the libslurm library,
 which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
@@ -245,7 +245,7 @@ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 .SH "SEE ALSO"
-.LP 
-\fBslurm_step_ctx_create\fR(3), \fBslurm_step_ctx_destroy\fR(3), 
+.LP
+\fBslurm_step_ctx_create\fR(3), \fBslurm_step_ctx_destroy\fR(3),
 \fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3),
-\fBsalloc\fR(1), \fBsrun\fR(1) 
+\fBsalloc\fR(1), \fBsrun\fR(1)
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index 41973e3ffb74e9f18ec07d8bd78a8780212316bb..2bf1634c1e0a6321069569d53fe0a4107e6a3c33 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -1,47 +1,47 @@
 .TH "bluegene.conf" "5" "April 2008" "bluegene.conf 2.0" "Slurm configuration file"
 .SH "NAME"
-bluegene.conf \- Slurm configuration file for BlueGene systems 
+bluegene.conf \- Slurm configuration file for BlueGene systems
 .SH "DESCRIPTION"
-\fB/etc/bluegene.conf\fP is an ASCII file which describes BlueGene specific 
-SLURM configuration information. This includes specifications for bgblock 
+\fB/etc/bluegene.conf\fP is an ASCII file which describes BlueGene specific
+SLURM configuration information. This includes specifications for bgblock
 layout, configuration, logging, etc.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the 
-same directory as the \fBslurm.conf\fP file. 
+DEFAULT_SLURM_CONF parameter. The file will always be located in the
+same directory as the \fBslurm.conf\fP file.
 .LP
 Paramter names are case insensitive.
-Any text following a "#" in the configuration file is treated 
-as a comment through the end of that line. 
+Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
-Changes to the configuration file take effect upon restart of 
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution 
+Changes to the configuration file take effect upon restart of
+SLURM daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 
-There are some differences between Bluegene/L and Bluegene/P in respects to the contents of the bluegene.conf file.  
+There are some differences between Bluegene/L and Bluegene/P in respects to the contents of the bluegene.conf file.
 
 .SH "The Bluegene/L specific options are:"
 .TP
 \fBAltBlrtsImage\fR
-Alternative BlrtsImage.  This is an optional field only used for 
+Alternative BlrtsImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if 
+the user groups allowed to use this image (i.e. Groups=da,jette) if
 Groups= is not stated then this image will be able to be used by all
 groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltLinuxImage\fR
-Alternative LinuxImage.  This is an optional field only used for 
+Alternative LinuxImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if 
+the user groups allowed to use this image (i.e. Groups=da,jette) if
 Groups= is not stated then this image will be able to be used by all
 groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltRamDiskImage\fR
-Alternative RamDiskImage.  This is an optional field only used for 
+Alternative RamDiskImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if 
+the user groups allowed to use this image (i.e. Groups=da,jette) if
 Groups= is not stated then this image will be able to be used by all
 groups. You can put as many alternative images as you want in the conf file.
 
@@ -63,17 +63,17 @@ There is no default value and this must be specified.
 .SH "The Bluegene/P specific options are:"
 .TP
 \fBAltCnloadImage\fR
-Alternative CnloadImage.  This is an optional field only used for 
+Alternative CnloadImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if 
+the user groups allowed to use this image (i.e. Groups=da,jette) if
 Groups= is not stated then this image will be able to be used by all
 groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltIoloadImage\fR
-Alternative IoloadImage.  This is an optional field only used for 
+Alternative IoloadImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if 
+the user groups allowed to use this image (i.e. Groups=da,jette) if
 Groups= is not stated then this image will be able to be used by all
 groups. You can put as many alternative images as you want in the conf file.
 
@@ -90,9 +90,9 @@ There is no default value and this must be specified.
 .SH "All options below are common on all Bluegene systems:"
 .TP
 \fBAltMloaderImage\fR
-Alternative MloaderImage.  This is an optional field only used for 
+Alternative MloaderImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if 
+the user groups allowed to use this image (i.e. Groups=da,jette) if
 Groups= is not stated then this image will be able to be used by all
 groups. You can put as many alternative images as you want in the conf file.
 
@@ -104,7 +104,7 @@ is usually 512)
 
 .TP
 \fBBridgeAPILogFile\fR
-Fully qualified pathname of a into which the Bridge API logs are 
+Fully qualified pathname of a into which the Bridge API logs are
 to be written.
 There is no default value.
 
@@ -138,13 +138,13 @@ Describes how SLURM should create bgblocks.
 \fBSTATIC\fR: Create and use the defined non\-overlapping bgblocks.
 .TP
 \fBOVERLAP\fR: Create and use the defined bgblocks, which may overlap.
-It is highly recommended that none of the bgblocks have any passthroughs 
-in the X\-dimension. 
+It is highly recommended that none of the bgblocks have any passthroughs
+in the X\-dimension.
 \fBUse this mode with extreme caution.\fR
 .TP
 \fBDYNAMIC\fR: Create and use bglblocks as needed for each job.
 Bgblocks will not be defined in the bluegene.conf file.
-Dynamic partitioning may introduce fragmentation of resources 
+Dynamic partitioning may introduce fragmentation of resources
 and starvation of larger jobs.
 \fBUse this mode with caution.\fR
 .RE
@@ -157,7 +157,7 @@ There is no default value and this must be specified.
 .TP
 \fBNodeCardNodeCount\fR
 Number of c\-nodes per node card.
-There is no default value and this must be specified. (For bgl systems this 
+There is no default value and this must be specified. (For bgl systems this
 is usually 32)
 
 .TP
@@ -166,10 +166,10 @@ The Numpsets used for creation of all bgblocks.  This value really means the
 number of IOnodes on a base partition.  This number must be the smallest
 number if you have a heterogeneous system.
 There is no default value and this must be specified.  The typical settings
-for bgl systems goes as follows... For IO rich systems 64 is the value that 
-should be used to create small blocks.  For systems that are not IO rich, or 
+for bgl systems goes as follows... For IO rich systems 64 is the value that
+should be used to create small blocks.  For systems that are not IO rich, or
 you do not wish to create small blocks, 8 is usually the number to use.
-For bgp IO rich systems 32 is the value that should be used to create small 
+For bgp IO rich systems 32 is the value that should be used to create small
 blocks since you can only have 2 ionodes per nodecard instead of 4 like on bgl.
 
 .LP
@@ -177,8 +177,8 @@ Each bgblock is defined by the base partitions used to construct it.
 Ordering is very important for laying out switch wires.  Please create
 blocks with smap, and once done don't change the order of blocks created.
 A bgblock is implicitly created containing all resources on the system.
-Bgblocks must not overlap in static mode (except for implicitly 
-created bgblock). This will be the case when smap is used to create 
+Bgblocks must not overlap in static mode (except for implicitly
+created bgblock). This will be the case when smap is used to create
 a configuration file
 All Nodes defined here must also be defined in the slurm.conf file.
 Define only the numeric coordinates of the bgblocks here. The prefix
@@ -197,25 +197,25 @@ The default value is TORUS.
 \fBMESH\fR: Communication occur over a mesh.
 .TP
 \fBSMALL\fR: The base partition is divided into more than one bgblock.
-The administrator should define the number of single node cards and 
-quarter base partition blocks using the options \fB32CNBlocks\fR and 
-\fB128CNBlocks\fR respectively for a Bluegene L system.  \fB16CNBlocks\fR, 
-\fB64CNBlocks\fR, and \fB256CNBlocks\fR are also available for 
-Bluegene P systems.  Keep in mind you 
+The administrator should define the number of single node cards and
+quarter base partition blocks using the options \fB32CNBlocks\fR and
+\fB128CNBlocks\fR respectively for a Bluegene L system.  \fB16CNBlocks\fR,
+\fB64CNBlocks\fR, and \fB256CNBlocks\fR are also available for
+Bluegene P systems.  Keep in mind you
 must have enough ionodes to make all these configurations possible.
 
 The total number of c\-nodes in defined blocks must not exceed
 \fBBasePartitionNodeCnt\fR.
-If not specified, the base partition will be divided into four 
+If not specified, the base partition will be divided into four
 blocks.
 See example below.
 .TP
-\fBTORUS\fR: Communications occur over a torus (end\-points of network 
+\fBTORUS\fR: Communications occur over a torus (end\-points of network
 directly connect.
 .RE
 
 .SH "EXAMPLE"
-.LP 
+.LP
 .br
 ##################################################################
 .br
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index c9789c5709061c30aa3d70c9436b443816aff099..ea77b11d39d4a0cee3de41ce3df098487f8c2870 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1,7 +1,7 @@
 .TH "slurm.conf" "5" "August 2009" "slurm.conf 2.1" "Slurm configuration file"
 
 .SH "NAME"
-slurm.conf \- Slurm configuration file 
+slurm.conf \- Slurm configuration file
 .SH "DESCRIPTION"
 \fB/etc/slurm.conf\fP is an ASCII file which describes general SLURM
 configuration information, the nodes to be managed, information about
@@ -16,18 +16,18 @@ location of this file. The SLURM daemons also allow you to override
 both the built\-in and environment\-provided location using the "\-f"
 option on the command line.
 .LP
-Note the while SLURM daemons create log files and other files as needed, 
-it treats the lack of parent directories as a fatal error. 
+Note the while SLURM daemons create log files and other files as needed,
+it treats the lack of parent directories as a fatal error.
 This prevents the daemons from running if critical file systems are
-not mounted and will minimize the risk of cold\-starting (starting 
+not mounted and will minimize the risk of cold\-starting (starting
 without preserving jobs).
 .LP
-The contents of the file are case insensitive except for the names of nodes 
-and partitions. Any text following a "#" in the configuration file is treated 
-as a comment through the end of that line. 
+The contents of the file are case insensitive except for the names of nodes
+and partitions. Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
-Changes to the configuration file take effect upon restart of 
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution 
+Changes to the configuration file take effect upon restart of
+SLURM daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 If a line begins with the word "Include" followed by whitespace
@@ -39,7 +39,7 @@ The overall configuration parameters available include:
 .TP
 \fBAccountingStorageBackupHost\fR
 The name of the backup machine hosting the accounting storage database.
-If used with the accounting_storage/slurmdbd plugin, this is where the backup 
+If used with the accounting_storage/slurmdbd plugin, this is where the backup
 slurmdbd would be running.
 Only used for database type storage plugins, ignored otherwise.
 
@@ -47,14 +47,14 @@ Only used for database type storage plugins, ignored otherwise.
 \fBAccountingStorageEnforce\fR
 This controls what level of enforcement you want on associations when new
 jobs are submitted.  Valid options are any combination of associations, limits,
-and wckeys, or all for all things.  If limits is set associations is implied.  
-If wckeys is set both limits and associations are implied along with 
-TrackWckey being set.  By enforcing Associations no new job is allowed to run 
-unless a corresponding association exists in the system.  If limits are 
-enforced users can be limited by association to how many nodes or how long 
-jobs can run or other limits.  With wckeys enforced jobs will not be scheduled 
-unless a valid workload characterization key is specified.  This value may not 
-be reset via "scontrol reconfig". It only takes effect upon restart 
+and wckeys, or all for all things.  If limits is set associations is implied.
+If wckeys is set both limits and associations are implied along with
+TrackWckey being set.  By enforcing Associations no new job is allowed to run
+unless a corresponding association exists in the system.  If limits are
+enforced users can be limited by association to how many nodes or how long
+jobs can run or other limits.  With wckeys enforced jobs will not be scheduled
+unless a valid workload characterization key is specified.  This value may not
+be reset via "scontrol reconfig". It only takes effect upon restart
 of the slurmctld daemon.
 
 .TP
@@ -122,44 +122,44 @@ Also see \fBDefaultStorageUser\fR.
 
 .TP
 \fBAuthType\fR
-The authentication method for communications between SLURM 
-components. 
-Acceptable values at present include "auth/none", "auth/authd", 
+The authentication method for communications between SLURM
+components.
+Acceptable values at present include "auth/none", "auth/authd",
 and "auth/munge".
 The default value is "auth/munge".
-"auth/none" includes the UID in each communication, but it is not verified. 
-This may be fine for testing purposes, but 
+"auth/none" includes the UID in each communication, but it is not verified.
+This may be fine for testing purposes, but
 \fBdo not use "auth/none" if you desire any security\fR.
 "auth/authd" indicates that Brett Chun's authd is to be used (see
 "http://www.theether.org/authd/" for more information. Note that
 authd is no longer actively supported).
 "auth/munge" indicates that LLNL's MUNGE is to be used
-(this is the best supported authentication mechanism for SLURM, 
+(this is the best supported authentication mechanism for SLURM,
 see "http://home.gna.org/munge/" for more information).
-All SLURM daemons and commands must be terminated prior to changing 
-the value of \fBAuthType\fR and later restarted (SLURM jobs can be 
+All SLURM daemons and commands must be terminated prior to changing
+the value of \fBAuthType\fR and later restarted (SLURM jobs can be
 preserved).
 
 .TP
 \fBBackupAddr\fR
 The name that \fBBackupController\fR should be referred to in
-establishing a communications path. This name will 
-be used as an argument to the gethostbyname() function for 
-identification. For example, "elx0000" might be used to designate 
-the Ethernet address for node "lx0000". 
-By default the \fBBackupAddr\fR will be identical in value to 
+establishing a communications path. This name will
+be used as an argument to the gethostbyname() function for
+identification. For example, "elx0000" might be used to designate
+the Ethernet address for node "lx0000".
+By default the \fBBackupAddr\fR will be identical in value to
 \fBBackupController\fR.
 
 .TP
 \fBBackupController\fR
-The name of the machine where SLURM control functions are to be 
+The name of the machine where SLURM control functions are to be
 executed in the event that \fBControlMachine\fR fails. This node
-may also be used as a compute server if so desired. It will come into service 
-as a controller only upon the failure of ControlMachine and will revert 
-to a "standby" mode when the ControlMachine becomes available once again. 
+may also be used as a compute server if so desired. It will come into service
+as a controller only upon the failure of ControlMachine and will revert
+to a "standby" mode when the ControlMachine becomes available once again.
 This should be a node name without the full domain name.   I.e., the hostname
-returned by the \fIgethostname()\fR function cut at the first dot (e.g. use 
-"tux001" rather than "tux001.my.com"). 
+returned by the \fIgethostname()\fR function cut at the first dot (e.g. use
+"tux001" rather than "tux001.my.com").
 While not essential, it is recommended that you specify a backup controller.
 See  the \fBRELOCATING CONTROLLERS\fR section if you change this.
 
@@ -181,9 +181,9 @@ The default value is 0 to disable caching group data.
 
 .TP
 \fBCheckpointType\fR
-The system\-initiated checkpoint method to be used for user jobs. 
-The slurmctld daemon must be restarted for a change in \fBCheckpointType\fR 
-to take effect. 
+The system\-initiated checkpoint method to be used for user jobs.
+The slurmctld daemon must be restarted for a change in \fBCheckpointType\fR
+to take effect.
 Supported values presently include:
 .RS
 .TP 18
@@ -212,35 +212,35 @@ when multiple clusters report to the same database.
 .TP
 \fBCompleteWait\fR
 The time, in seconds, given for a job to remain in COMPLETING state
-before any additional jobs are scheduled. 
+before any additional jobs are scheduled.
 If set to zero, pending jobs will be started as soon as possible.
-Since a COMPLETING job's resources are released for use by other 
-jobs as soon as the \fBEpilog\fR completes on each individual node, 
-this can result in very fragmented resource allocations. 
-To provide jobs with the minimum response time, a value of zero is 
+Since a COMPLETING job's resources are released for use by other
+jobs as soon as the \fBEpilog\fR completes on each individual node,
+this can result in very fragmented resource allocations.
+To provide jobs with the minimum response time, a value of zero is
 recommended (no waiting).
-To minimize fragmentation of resources, a value equal to \fBKillWait\fR 
-plus two is recommended. 
+To minimize fragmentation of resources, a value equal to \fBKillWait\fR
+plus two is recommended.
 In that case, setting \fBKillWait\fR to a small value may be beneficial.
 The default value of \fBCompleteWait\fR is zero seconds.
 The value may not exceed 65533.
 
 .TP
 \fBControlAddr\fR
-Name that \fBControlMachine\fR should be referred to in 
-establishing a communications path. This name will 
-be used as an argument to the gethostbyname() function for 
-identification. For example, "elx0000" might be used to designate 
-the Ethernet address for node "lx0000". 
-By default the \fBControlAddr\fR will be identical in value to 
+Name that \fBControlMachine\fR should be referred to in
+establishing a communications path. This name will
+be used as an argument to the gethostbyname() function for
+identification. For example, "elx0000" might be used to designate
+the Ethernet address for node "lx0000".
+By default the \fBControlAddr\fR will be identical in value to
 \fBControlMachine\fR.
 
 .TP
 \fBControlMachine\fR
 The short hostname of the machine where SLURM control functions are
-executed (i.e. the name returned by the command "hostname -s", use 
-"tux001" rather than "tux001.my.com").  
-This value must be specified. 
+executed (i.e. the name returned by the command "hostname -s", use
+"tux001" rather than "tux001.my.com").
+This value must be specified.
 In order to support some high availability architectures, multiple
 hostnames may be listed with comma separators and one \fBControlAddr\fR
 must be specified. The high availability system must insure that the
@@ -249,7 +249,7 @@ See the \fBRELOCATING CONTROLLERS\fR section if you change this.
 
 .TP
 \fBCryptoType\fR
-The cryptographic signature tool to be used in the creation of 
+The cryptographic signature tool to be used in the creation of
 job step credentials.
 The slurmctld daemon must be restarted for a change in \fBCryptoType\fR
 to take effect.
@@ -259,7 +259,7 @@ The default value is "crypto/munge".
 .TP
 \fBDebugFlags\fR
 Defines specific subsystems which should provide more detailed event logging.
-Multiple subsystems can be specified with comma separators. 
+Multiple subsystems can be specified with comma separators.
 Valid subsystems available today (with more to come) include:
 .RS
 .TP 15
@@ -278,14 +278,14 @@ Sched/wiki and wiki2 communications
 
 .TP
 \fBDefMemPerCPU\fR
-Default real memory size available per allocated CPU in MegaBytes. 
+Default real memory size available per allocated CPU in MegaBytes.
 Used to avoid over\-subscribing memory and causing paging.
 \fBDefMemPerCPU\fR would generally be used if individual processors
-are allocated to jobs (\fBSelectType=select/cons_res\fR). 
+are allocated to jobs (\fBSelectType=select/cons_res\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerNode\fR and \fBMaxMemPerCPU\fR.
 \fBDefMemPerCPU\fR and \fBDefMemPerNode\fR are mutually exclusive.
-NOTE: Enforcement of memory limits currently requires enabling of 
+NOTE: Enforcement of memory limits currently requires enabling of
 accounting, which samples memory use on a periodic basis (data need
 not be stored, just collected).
 
@@ -294,12 +294,12 @@ not be stored, just collected).
 Default real memory size available per allocated node in MegaBytes.
 Used to avoid over\-subscribing memory and causing paging.
 \fBDefMemPerNode\fR would generally be used if whole nodes
-are allocated to jobs (\fBSelectType=select/linear\fR) and 
+are allocated to jobs (\fBSelectType=select/linear\fR) and
 resources are shared (\fBShared=yes\fR or \fBShared=force\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerCPU\fR and \fBMaxMemPerNode\fR.
 \fBDefMemPerCPU\fR and \fBDefMemPerNode\fR are mutually exclusive.
-NOTE: Enforcement of memory limits currently requires enabling of 
+NOTE: Enforcement of memory limits currently requires enabling of
 accounting, which samples memory use on a periodic basis (data need
 not be stored, just collected).
 
@@ -364,15 +364,15 @@ The default value is "NO", meaning user root will be able to execute jobs.
 .TP
 \fBEnforcePartLimits\fR
 If set to "YES" then jobs which exceed a partition's size and/or time limits
-will be rejected at submission time. If set to "NO" then the job will be 
+will be rejected at submission time. If set to "NO" then the job will be
 accepted and remain queued until the partition limits are altered.
 The default value is "NO".
 
 .TP
 \fBEpilog\fR
-Fully qualified pathname of a script to execute as user root on every 
-node when a user's job completes (e.g. "/usr/local/slurm/epilog"). This may 
-be used to purge files, disable user login, etc. 
+Fully qualified pathname of a script to execute as user root on every
+node when a user's job completes (e.g. "/usr/local/slurm/epilog"). This may
+be used to purge files, disable user login, etc.
 By default there is no epilog.
 See \fBProlog and Epilog Scripts\fR for more information.
 
@@ -380,8 +380,8 @@ See \fBProlog and Epilog Scripts\fR for more information.
 \fBEpilogMsgTime\fR
 The number of microseconds the the slurmctld daemon requires to process
 an epilog completion message from the slurmd dameons. This parameter can
-be used to prevent a burst of epilog completion messages from being sent 
-at the same time which should help prevent lost messages and improve 
+be used to prevent a burst of epilog completion messages from being sent
+at the same time which should help prevent lost messages and improve
 throughput for large jobs.
 The default value is 2000 microseconds.
 For a 1000 node job, this spreads the epilog completion messages out over
@@ -389,13 +389,13 @@ two seconds.
 
 .TP
 \fBEpilogSlurmctld\fR
-Fully qualified pathname of a program for the slurmctld to execute 
+Fully qualified pathname of a program for the slurmctld to execute
 upon termination of a job allocation (e.g.
-"/usr/local/slurm/epilog_controller"). 
-The program executes as SlurmUser, which gives it permission to drain 
+"/usr/local/slurm/epilog_controller").
+The program executes as SlurmUser, which gives it permission to drain
 nodes and requeue the job if a failure occurs or cancel the job if appropriate.
-The program can be used to reboot nodes or perform other work to prepare 
-resources for use. 
+The program can be used to reboot nodes or perform other work to prepare
+resources for use.
 See \fBProlog and Epilog Scripts\fR for more information.
 
 .TP
@@ -417,9 +417,9 @@ slurm.conf configuration file and any node with less than the
 configured resources will be set DOWN.
 .TP
 \fB0\fR
-Base scheduling decisions upon the actual configuration of each individual 
+Base scheduling decisions upon the actual configuration of each individual
 node except that the node's processor count in SLURM's configuration must
-match the actual hardware configuration if \fBSchedulerType=sched/gang\fR 
+match the actual hardware configuration if \fBSchedulerType=sched/gang\fR
 or \fBSelectType=select/cons_res\fR are configured (both of those plugins
 maintain resource allocation information using bitmaps for the cores in the
 system and must remain static, while the node's memory and disk space can
@@ -428,24 +428,24 @@ be established later).
 \fB2\fR
 Consider the configuration of each node to be that specified in the
 slurm.conf configuration file and any node with less than the
-configured resources will \fBnot\fR be set DOWN. 
+configured resources will \fBnot\fR be set DOWN.
 This can be useful for testing purposes.
 .RE
 
 .TP
 \fBFirstJobId\fR
-The job id to be used for the first submitted to SLURM without a 
-specific requested value. Job id values generated will incremented by 1 
-for each subsequent job. This may be used to provide a meta\-scheduler 
-with a job id space which is disjoint from the interactive jobs. 
+The job id to be used for the first submitted to SLURM without a
+specific requested value. Job id values generated will incremented by 1
+for each subsequent job. This may be used to provide a meta\-scheduler
+with a job id space which is disjoint from the interactive jobs.
 The default value is 1.
 
 .TP
 \fBGetEnvTimeout\fR
 Used for Moab scheduled jobs only. Controls how long job should wait
-in seconds for loading the user's environment before attempting to 
-load it from a cache file. Applies when the srun or sbatch 
-\fI--get-user-env\fR option is used. If set to 0 then always load 
+in seconds for loading the user's environment before attempting to
+load it from a cache file. Applies when the srun or sbatch
+\fI--get-user-env\fR option is used. If set to 0 then always load
 the user's environment from the cache file.
 The default value is 2 seconds.
 
@@ -456,37 +456,37 @@ The default value is zero, which disables execution.
 
 .TP
 \fBHealthCheckProgram\fR
-Fully qualified pathname of a script to execute as user root periodically 
-on all compute nodes that are not in the DOWN state. This may be used to 
+Fully qualified pathname of a script to execute as user root periodically
+on all compute nodes that are not in the DOWN state. This may be used to
 verify the node is fully operational and DRAIN the node or send email
-if a problem is detected. 
+if a problem is detected.
 Any action to be taken must be explicitly performed by the program
 (e.g. execute "scontrol update NodeName=foo State=drain Reason=tmp_file_system_full"
 to drain a node).
 The interval is controlled using the \fBHealthCheckInterval\fR parameter.
-Note that the \fBHealthCheckProgram\fR will be executed at the same time 
+Note that the \fBHealthCheckProgram\fR will be executed at the same time
 on all nodes to minimize its impact upon parallel programs.
 This program is will be killed if it does not terminate normally within
-60 seconds. 
+60 seconds.
 By default, no program will be executed.
 
 .TP
 \fBInactiveLimit\fR
 The interval, in seconds, a job or job step is permitted to be inactive
-before it is terminated. A job or job step is considered inactive if 
-the associated srun command is not responding to slurm daemons. This 
-could be due to the termination of the srun command or the program 
-being is a stopped state. A batch job is considered inactive if it 
+before it is terminated. A job or job step is considered inactive if
+the associated srun command is not responding to slurm daemons. This
+could be due to the termination of the srun command or the program
+being is a stopped state. A batch job is considered inactive if it
 has no active job steps (e.g. periods of pre\- and post\-processing).
-This limit permits defunct jobs to be purged in a timely fashion 
+This limit permits defunct jobs to be purged in a timely fashion
 without waiting for their time limit to be reached.
 This value should reflect the possibility that the srun command may
-stopped by a debugger or considerable time could be required for batch 
-job pre\- and post\-processing. 
-This limit is ignored for jobs running in partitions with the 
-\fBRootOnly\fR flag set (the scheduler running as root will be 
+stopped by a debugger or considerable time could be required for batch
+job pre\- and post\-processing.
+This limit is ignored for jobs running in partitions with the
+\fBRootOnly\fR flag set (the scheduler running as root will be
 responsible for the job).
-The default value is unlimited (zero). 
+The default value is unlimited (zero).
 May not exceed 65533.
 
 .TP
@@ -496,17 +496,17 @@ Acceptable values at present include "jobacct_gather/aix" (for AIX operating
 system), "jobacct_gather/linux" (for Linux operating system) and "jobacct_gather/none"
 (no accounting data collected).
 The default value is "jobacct_gather/none".
-In order to use the \fBsacct\fR tool, "jobacct_gather/aix" or "jobacct_gather/linux" 
+In order to use the \fBsacct\fR tool, "jobacct_gather/aix" or "jobacct_gather/linux"
 must be configured.
 
 .TP
 \fBJobAcctGatherFrequency\fR
 The job accounting sampling interval.
 For jobacct_gather/none this parameter is ignored.
-For  jobacct_gather/aix and jobacct_gather/linux the parameter is a number is 
+For  jobacct_gather/aix and jobacct_gather/linux the parameter is a number is
 seconds between sampling job state.
-The default value is 30 seconds. 
-A value of zero disables real the periodic job sampling and provides accounting 
+The default value is 30 seconds.
+A value of zero disables real the periodic job sampling and provides accounting
 information only on job termination (reducing SLURM interference with the job).
 
 .TP
@@ -544,20 +544,20 @@ Also see \fBDefaultStoragePort\fR.
 .TP
 \fBJobCompType\fR
 The job completion logging mechanism type.
-Acceptable values at present include "jobcomp/none", "jobcomp/filetxt", 
+Acceptable values at present include "jobcomp/none", "jobcomp/filetxt",
 "jobcomp/mysql", "jobcomp/pgsql", and "jobcomp/script"".
-The default value is "jobcomp/none", which means that upon job completion 
-the record of the job is purged from the system.  If using the accounting 
-infrastructure this plugin may not be of interest since the information 
+The default value is "jobcomp/none", which means that upon job completion
+the record of the job is purged from the system.  If using the accounting
+infrastructure this plugin may not be of interest since the information
 here is redundant.
-The value "jobcomp/filetxt" indicates that a record of the job should be 
+The value "jobcomp/filetxt" indicates that a record of the job should be
 written to a text file specified by the \fBJobCompLoc\fR parameter.
-The value "jobcomp/mysql" indicates that a record of the job should be 
+The value "jobcomp/mysql" indicates that a record of the job should be
 written to a mysql database specified by the \fBJobCompLoc\fR parameter.
-The value "jobcomp/pgsql" indicates that a record of the job should be 
+The value "jobcomp/pgsql" indicates that a record of the job should be
 written to a PostgreSQL database specified by the \fBJobCompLoc\fR parameter.
-The value "jobcomp/script" indicates that a script specified by the 
-\fBJobCompLoc\fR parameter is to be executed with environment variables 
+The value "jobcomp/script" indicates that a script specified by the
+\fBJobCompLoc\fR parameter is to be executed with environment variables
 indicating the job information.
 
 .TP
@@ -568,60 +568,60 @@ Also see \fBDefaultStorageUser\fR.
 
 .TP
 \fBJobCredentialPrivateKey\fR
-Fully qualified pathname of a file containing a private key used for 
+Fully qualified pathname of a file containing a private key used for
 authentication by SLURM daemons.
 This parameter is ignored if \fBCryptoType=crypto/munge\fR.
 
 .TP
 \fBJobCredentialPublicCertificate\fR
-Fully qualified pathname of a file containing a public key used for 
+Fully qualified pathname of a file containing a public key used for
 authentication by SLURM daemons.
 This parameter is ignored if \fBCryptoType=crypto/munge\fR.
 
 .TP
 \fBJobFileAppend\fR
-This option controls what to do if a job's output or error file 
-exist when the job is started. 
-If \fBJobFileAppend\fR is set to a value of 1, then append to 
+This option controls what to do if a job's output or error file
+exist when the job is started.
+If \fBJobFileAppend\fR is set to a value of 1, then append to
 the existing file.
 By default, any existing file is truncated.
 
 .TP
 \fBJobRequeue\fR
-This option controls what to do by default after a node failure. 
-If \fBJobRequeue\fR is set to a value of 1, then any job running 
+This option controls what to do by default after a node failure.
+If \fBJobRequeue\fR is set to a value of 1, then any job running
 on the failed node will be requeued for execution on different nodes.
-If \fBJobRequeue\fR is set to a value of 0, then any job running 
+If \fBJobRequeue\fR is set to a value of 0, then any job running
 on the failed node will be terminated.
-Use the \fBsbatch\fR \fI\-\-no\-requeue\fR or \fI\-\-requeue\fR 
+Use the \fBsbatch\fR \fI\-\-no\-requeue\fR or \fI\-\-requeue\fR
 option to change the default behavior for individual jobs.
 The default value is 1.
 
 .TP
 \fBKillOnBadExit\fR
-If set to 1, the job will be terminated immediately when one of the 
-processes is crashed or aborted. With default value of 0, if one of 
-the processes is crashed or aborted the other processes will continue 
+If set to 1, the job will be terminated immediately when one of the
+processes is crashed or aborted. With default value of 0, if one of
+the processes is crashed or aborted the other processes will continue
 to run.
 
 .TP
 \fBKillWait\fR
-The interval, in seconds, given to a job's processes between the 
-SIGTERM and SIGKILL signals upon reaching its time limit. 
-If the job fails to terminate gracefully in the interval specified, 
-it will be forcibly terminated. 
+The interval, in seconds, given to a job's processes between the
+SIGTERM and SIGKILL signals upon reaching its time limit.
+If the job fails to terminate gracefully in the interval specified,
+it will be forcibly terminated.
 The default value is 30 seconds.
 The value may not exceed 65533.
 
 .TP
 \fBLicenses\fR
-Specification of licenses (or other resources available on all 
+Specification of licenses (or other resources available on all
 nodes of the cluster) which can be allocated to jobs.
-License names can optionally be followed by an asterisk 
+License names can optionally be followed by an asterisk
 and count with a default count of one.
 Multiple license names should be comma separated (e.g.
 "Licenses=foo*4,bar").
-Note that SLURM prevents jobs from being scheduled if their 
+Note that SLURM prevents jobs from being scheduled if their
 required license specification is not available.
 SLURM does not prevent jobs from using licenses that are
 not explicitly listed in the job submission specification.
@@ -633,25 +633,25 @@ The default value is "/bin/mail".
 
 .TP
 \fBMaxJobCount\fR
-The maximum number of jobs SLURM can have in its active database 
-at one time. Set the values of \fBMaxJobCount\fR and \fBMinJobAge\fR 
-to insure the slurmctld daemon does not exhaust its memory or other 
-resources. Once this limit is reached, requests to submit additional 
-jobs will fail. The default value is 5000 jobs. This value may not 
-be reset via "scontrol reconfig". It only takes effect upon restart 
+The maximum number of jobs SLURM can have in its active database
+at one time. Set the values of \fBMaxJobCount\fR and \fBMinJobAge\fR
+to insure the slurmctld daemon does not exhaust its memory or other
+resources. Once this limit is reached, requests to submit additional
+jobs will fail. The default value is 5000 jobs. This value may not
+be reset via "scontrol reconfig". It only takes effect upon restart
 of the slurmctld daemon.
 May not exceed 65533.
 
 .TP
 \fBMaxMemPerCPU\fR
-Maximum real memory size available per allocated CPU in MegaBytes. 
+Maximum real memory size available per allocated CPU in MegaBytes.
 Used to avoid over\-subscribing memory and causing paging.
 \fBMaxMemPerCPU\fR would generally be used if individual processors
 are allocated to jobs (\fBSelectType=select/cons_res\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerCPU\fR and \fBMaxMemPerNode\fR.
 \fBMaxMemPerCPU\fR and \fBMaxMemPerNode\fR are mutually exclusive.
-NOTE: Enforcement of memory limits currently requires enabling of 
+NOTE: Enforcement of memory limits currently requires enabling of
 accounting, which samples memory use on a periodic basis (data need
 not be stored, just collected).
 
@@ -665,7 +665,7 @@ resources are shared (\fBShared=yes\fR or \fBShared=force\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerNode\fR and \fBMaxMemPerCPU\fR.
 \fBMaxMemPerCPU\fR and \fBMaxMemPerNode\fR are mutually exclusive.
-NOTE: Enforcement of memory limits currently requires enabling of 
+NOTE: Enforcement of memory limits currently requires enabling of
 accounting, which samples memory use on a periodic basis (data need
 not be stored, just collected).
 
@@ -677,39 +677,39 @@ on a single node. The default \fBMaxTasksPerNode\fR is 128.
 .TP
 \fBMessageTimeout\fR
 Time permitted for a round\-trip communication to complete
-in seconds. Default value is 10 seconds. For systems with 
-shared nodes, the slurmd daemon could be paged out and 
+in seconds. Default value is 10 seconds. For systems with
+shared nodes, the slurmd daemon could be paged out and
 necessitate higher values.
 
 .TP
 \fBMinJobAge\fR
-The minimum age of a completed job before its record is purged from 
-SLURM's active database. Set the values of \fBMaxJobCount\fR and 
-\fBMinJobAge\fR to insure the slurmctld daemon does not exhaust 
-its memory or other resources. The default value is 300 seconds. 
+The minimum age of a completed job before its record is purged from
+SLURM's active database. Set the values of \fBMaxJobCount\fR and
+\fBMinJobAge\fR to insure the slurmctld daemon does not exhaust
+its memory or other resources. The default value is 300 seconds.
 A value of zero prevents any job record purging.
 May not exceed 65533.
 
 .TP
 \fBMpiDefault\fR
-Identifies the default type of MPI to be used. 
+Identifies the default type of MPI to be used.
 Srun may override this configuration parameter in any case.
-Currently supported versions include: 
-\fBmpichgm\fR, 
+Currently supported versions include:
+\fBmpichgm\fR,
 \fBmvapich\fR,
-\fBnone\fR (default, which works for many other versions of MPI including 
+\fBnone\fR (default, which works for many other versions of MPI including
 LAM MPI and Open MPI).
 
 .TP
 \fBMpiParams\fR
-MPI parameters. 
+MPI parameters.
 Used to identify ports used by OpenMPI only and the input format is
 "ports=12000\-12999" to identify a range of communication ports to be used.
 
 .TP
 \fBOverTimeLimit\fR
-Number of minutes by which a job can exceed its time limit before 
-being canceled. 
+Number of minutes by which a job can exceed its time limit before
+being canceled.
 The configured job time limit is treated as a \fIsoft\fR limit.
 Adding \fBOverTimeLimit\fR to the \fIsoft\fR limit provides a \fIhard\fR
 limit, at which point the job is canceled.
@@ -721,9 +721,9 @@ A value of "UNLIMITED" is also supported.
 
 .TP
 \fBPluginDir\fR
-Identifies the places in which to look for SLURM plugins. 
-This is a colon\-separated list of directories, like the PATH 
-environment variable. 
+Identifies the places in which to look for SLURM plugins.
+This is a colon\-separated list of directories, like the PATH
+environment variable.
 The default value is "/usr/local/lib/slurm".
 
 .TP
@@ -744,14 +744,14 @@ which jobs are capable of preempting other jobs while \fBPreemptMode\fR
 controls the mechanism used to preempt the lower priority jobs.
 Jobs which are being gang scheduled (time sliced) rather than being preempted
 will always make use of a suspend/resume mechanism.
-The \fBGANG\fR option can be specified in addition to a preemption 
+The \fBGANG\fR option can be specified in addition to a preemption
 method specification with the two options comma separated.
 .RS
 .TP 12
 \fBOFF\fR
 is the default value and disables job preemption and gang scheduling.
-This is the only option compatible with \fBSchedulerType=sched/wiki\fR 
-or \fBSchedulerType=sched/wiki2\fR (used by Maui and Moab respectively, 
+This is the only option compatible with \fBSchedulerType=sched/wiki\fR
+or \fBSchedulerType=sched/wiki2\fR (used by Maui and Moab respectively,
 which provide their own job preemption functionality).
 .TP
 \fBCANCEL\fR
@@ -768,16 +768,16 @@ preempts jobs by requeuing them (if possible) or canceling them.
 .TP
 \fBSUSPEND\fR
 preempts jobs by suspending them.
-A suspended job will resume execution once the high priority job 
+A suspended job will resume execution once the high priority job
 preempting it completes.
 The \fBSUSPEND\fR may only be used with the \fBGANG\fR option
-(the gang scheduler module performs the job resume operation). 
+(the gang scheduler module performs the job resume operation).
 .RE
 
 .TP
 \fBPreemptType\fR
-This specifies the plugin used to identify which jobs can be 
-preempted in order to start a pending job. 
+This specifies the plugin used to identify which jobs can be
+preempted in order to start a pending job.
 .RS
 .TP
 \fBpreempt/none\fR
@@ -786,7 +786,7 @@ This is the default.
 .TP
 \fBpreempt/partition_prio\fR
 Job preemption is based upon partition priority.
-Jobs in higher priority partitions (queues) may preempt jobs from lower 
+Jobs in higher priority partitions (queues) may preempt jobs from lower
 priority partitions.
 .TP
 \fBpreempt/qos\fR
@@ -799,12 +799,12 @@ This is not compatible with \fBPreemptMode=OFF\fR or \fBPreemptMode=SUSPEND\fR
 .TP
 \fBPriorityDecayHalfLife\fR
 This controls how long prior resource use is considered in determining
-how over\- or under\-serviced an association is (user, bank account and 
-cluster) in determining job priority.  If set to 0 no decay will be applied.  
-This is helpful if you want to enforce hard time limits per association.  If 
+how over\- or under\-serviced an association is (user, bank account and
+cluster) in determining job priority.  If set to 0 no decay will be applied.
+This is helpful if you want to enforce hard time limits per association.  If
 set to 0 \fBPriorityUsageResetPeriod\fR must be set to some interval.
 Applicable only if \fBPriorityType=priority/multifactor\fR.
-The unit is a time string (i.e. min, hr:min:00, days\-hr:min:00, 
+The unit is a time string (i.e. min, hr:min:00, days\-hr:min:00,
 or days\-hr).  The default value is 7\-0 (7 days).
 
 .TP
@@ -820,17 +820,17 @@ Specifies the job age which will be given the maximum age factor in computing
 priority. For example, a value of 30 minutes would result in all jobs over
 30 minutes old would get the same age\-based priority.
 Applicable only if PriorityType=priority/multifactor.
-The unit is a time string (i.e. min, hr:min:00, days\-hr:min:00, 
+The unit is a time string (i.e. min, hr:min:00, days\-hr:min:00,
 or days\-hr).  The default value is 7\-0 (7 days).
 
 .TP
 \fBPriorityUsageResetPeriod\fR
-At this interval the usage of associations will be reset to 0.  This is used 
-if you want to enforce hard limits of time usage per association.  If 
-PriorityDecayHalfLife is set to be 0 no decay will happen and this is the 
-only way to reset the usage accumulated by running jobs.  By default this is 
+At this interval the usage of associations will be reset to 0.  This is used
+if you want to enforce hard limits of time usage per association.  If
+PriorityDecayHalfLife is set to be 0 no decay will happen and this is the
+only way to reset the usage accumulated by running jobs.  By default this is
 turned off and it is advised to use the PriorityDecayHalfLife option to avoid
-not having anything running on your cluster, but if your schema is set up to 
+not having anything running on your cluster, but if your schema is set up to
 only allow certain amounts of time on your system this is the way to do it.
 Applicable only if PriorityType=priority/multifactor.
 .RS
@@ -839,7 +839,7 @@ Applicable only if PriorityType=priority/multifactor.
 Never clear historic usage. The default value.
 .TP
 \fBNOW\fR
-Clear the historic usage now. 
+Clear the historic usage now.
 Executed at startup and reconfiguration time.
 .TP
 \fBDAILY\fR
@@ -863,8 +863,8 @@ Cleared on the first day of each year at time 00:00.
 This specifies the plugin to be used in establishing a job's scheduling
 priority. Supported values are "priority/basic" (jobs are prioritized
 by order of arrival, also suitable for sched/wiki and sched/wiki2) and
-"priority/multifactor" (jobs are prioritized based upon size, age, 
-fair\-share of allocation, etc). 
+"priority/multifactor" (jobs are prioritized based upon size, age,
+fair\-share of allocation, etc).
 The default value is "priority/basic".
 
 .TP
@@ -911,91 +911,91 @@ Multiple values may be specified with a comma separator.
 Acceptable values include:
 .RS
 .TP
-\fBaccounts\fR 
-(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing any account 
+\fBaccounts\fR
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing any account
 definitions unless they are coordinators of them.
 .TP
 \fBjobs\fR
 prevents users from viewing jobs or job steps belonging
-to other users. (NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
-job records belonging to other users unless they are coordinators of 
+to other users. (NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing
+job records belonging to other users unless they are coordinators of
 the association running the job when using sacct.
 .TP
 \fBnodes\fR
 prevents users from viewing node state information.
 .TP
-\fBpartitions\fR 
+\fBpartitions\fR
 prevents users from viewing partition state information.
 .TP
-\fBreservations\fR 
+\fBreservations\fR
 prevents regular users from viewing reservations.
 .TP
-\fBusage\fR 
-(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
+\fBusage\fR
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing
 usage of any other user.  This applies to sreport.
 .TP
-\fBusers\fR 
-(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
-information of any user other than themselves, this also makes it so users can 
-only see associations they deal with.  
-Coordinators can see associations of all users they are coordinator of, 
+\fBusers\fR
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing
+information of any user other than themselves, this also makes it so users can
+only see associations they deal with.
+Coordinators can see associations of all users they are coordinator of,
 but can only see themselves when listing users.
 .RE
 
 .TP
 \fBProctrackType\fR
-Identifies the plugin to be used for process tracking. 
-The slurmd daemon uses this mechanism to identify all processes 
-which are children of processes it spawns for a user job. 
+Identifies the plugin to be used for process tracking.
+The slurmd daemon uses this mechanism to identify all processes
+which are children of processes it spawns for a user job.
 The slurmd daemon must be restarted for a change in ProctrackType
 to take effect.
-NOTE: "proctrack/linuxproc" and "proctrack/pgid" can fail to 
-identify all processes associated with a job since processes 
-can become a child of the init process (when the parent process 
-terminates) or change their process group. 
-To reliably track all processes, one of the other mechanisms 
-utilizing kernel modifications is preferable. 
+NOTE: "proctrack/linuxproc" and "proctrack/pgid" can fail to
+identify all processes associated with a job since processes
+can become a child of the init process (when the parent process
+terminates) or change their process group.
+To reliably track all processes, one of the other mechanisms
+utilizing kernel modifications is preferable.
 NOTE: "proctrack/linuxproc" is not compatible with "switch/elan."
 Acceptable values at present include:
 .RS
-.TP 
-\fBproctrack/aix\fR which uses an AIX kernel extension and is 
+.TP
+\fBproctrack/aix\fR which uses an AIX kernel extension and is
 the default for AIX systems
 .TP
-\fBproctrack/linuxproc\fR which uses linux process tree using 
+\fBproctrack/linuxproc\fR which uses linux process tree using
 parent process IDs
 .TP
-\fBproctrack/rms\fR which uses Quadrics kernel patch and is the 
-default if "SwitchType=switch/elan" 
+\fBproctrack/rms\fR which uses Quadrics kernel patch and is the
+default if "SwitchType=switch/elan"
 .TP
 \fBproctrack/sgi_job\fR which uses SGI's Process Aggregates (PAGG)
-kernel module, see \fIhttp://oss.sgi.com/projects/pagg/\fR 
-for more information 
+kernel module, see \fIhttp://oss.sgi.com/projects/pagg/\fR
+for more information
 .TP
-\fBproctrack/pgid\fR which uses process group IDs and is the 
+\fBproctrack/pgid\fR which uses process group IDs and is the
 default for all other systems
 .RE
 
 .TP
 \fBProlog\fR
-Fully qualified pathname of a program for the slurmd to execute 
+Fully qualified pathname of a program for the slurmd to execute
 whenever it is asked to run a job step from a new job allocation (e.g.
 "/usr/local/slurm/prolog").  The slurmd executes the script before starting
 the first job step.  This may be used to purge files, enable user login, etc.
-By default there is no prolog. Any configured script is expected to 
+By default there is no prolog. Any configured script is expected to
 complete execution quickly (in less time than \fBMessageTimeout\fR).
 See \fBProlog and Epilog Scripts\fR for more information.
 
 .TP
 \fBPrologSlurmctld\fR
-Fully qualified pathname of a program for the slurmctld to execute 
+Fully qualified pathname of a program for the slurmctld to execute
 before granting a new job allocation (e.g.
-"/usr/local/slurm/prolog_controller"). 
-The program executes as SlurmUser, which gives it permission to drain 
+"/usr/local/slurm/prolog_controller").
+The program executes as SlurmUser, which gives it permission to drain
 nodes and requeue the job if a failure occurs or cancel the job if appropriate.
-The program can be used to reboot nodes or perform other work to prepare 
-resources for use. 
-While this program is running, the nodes associated with the job will be 
+The program can be used to reboot nodes or perform other work to prepare
+resources for use.
+While this program is running, the nodes associated with the job will be
 have a POWER_UP/CONFIGURING flag set in their state, which can be readily
 viewed.
 A non\-zero exit code will result in the job being requeued (where possible)
@@ -1016,7 +1016,7 @@ A list of comma separated resource limit names.
 The slurmd daemon uses these names to obtain the associated (soft) limit
 values from the users process environment on the submit node.
 These limits are then propagated and applied to the jobs that
-will run on the compute nodes. 
+will run on the compute nodes.
 This parameter can be useful when system limits vary among nodes.
 Any resource limits that do not appear in the list are not propagated.
 However, the user can override this by specifying which resource limits
@@ -1026,7 +1026,7 @@ the default action is to propagate all limits.
 Only one of the parameters, either
 \fBPropagateResourceLimits\fR or \fBPropagateResourceLimitsExcept\fR,
 may be specified.
-The following limit names are supported by SLURM (although some 
+The following limit names are supported by SLURM (although some
 options may not be supported on some systems):
 .RS
 .TP 10
@@ -1078,11 +1078,11 @@ See \fBPropagateResourceLimits\fR above for a list of valid limit names.
 
 .TP
 \fBResumeProgram\fR
-SLURM supports a mechanism to reduce power consumption on nodes that 
-remain idle for an extended period of time. 
+SLURM supports a mechanism to reduce power consumption on nodes that
+remain idle for an extended period of time.
 This is typically accomplished by reducing voltage and frequency or powering
-the node down. 
-\fBResumeProgram\fR is the program that will be executed when a node 
+the node down.
+\fBResumeProgram\fR is the program that will be executed when a node
 in power save mode is assigned work to perform.
 For reasons of reliability, \fBResumeProgram\fR may execute more than once
 for a node when the \fBslurmctld\fR daemon crashes and is restarted.
@@ -1093,43 +1093,43 @@ The argument to the program will be the names of nodes to
 be removed from power savings mode (using SLURM's hostlist
 expression format).
 By default no program is run.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeRate\fR, 
-\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR, 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeRate\fR,
+\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
 (https://computing.llnl.gov/linux/slurm/power_save.html).
 
 .TP
 \fBResumeRate\fR
-The rate at which nodes in power save mode are returned to normal 
-operation by \fBResumeProgram\fR. 
-The value is number of nodes per minute and it can be used to prevent 
-power surges if a large number of nodes in power save mode are 
+The rate at which nodes in power save mode are returned to normal
+operation by \fBResumeProgram\fR.
+The value is number of nodes per minute and it can be used to prevent
+power surges if a large number of nodes in power save mode are
 assigned work at the same time (e.g. a large job starts).
-A value of zero results in no limits being imposed. 
+A value of zero results in no limits being imposed.
 The default value is 300 nodes per minute.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
-\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR, 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
+\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBResumeTimeout\fR
-Maximum time permitted (in second) between when a node is resume request 
-is issued and when the node is actually available for use. 
+Maximum time permitted (in second) between when a node is resume request
+is issued and when the node is actually available for use.
 Nodes which fail to respond in this time frame may be marked DOWN and
 the jobs scheduled on the node requeued.
 The default value is 60 seconds.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, 
-\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR, 
+Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
+\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
 (https://computing.llnl.gov/linux/slurm/power_save.html).
 
 .TP
 \fBResvOverRun\fR
-Describes how long a job already running in a reservation should be 
-permitted to execute after the end time of the reservation has been 
-reached. 
+Describes how long a job already running in a reservation should be
+permitted to execute after the end time of the reservation has been
+reached.
 The time period is specified in minutes and the default value is 0
 (kill the job immediately).
 The value may not exceed 65533 minutes, although a value of "UNLIMITED"
@@ -1138,7 +1138,7 @@ is terminated.
 
 .TP
 \fBReturnToService\fR
-Controls when a DOWN node will be returned to service. 
+Controls when a DOWN node will be returned to service.
 The default value is 0.
 Supported values include
 .RS
@@ -1150,13 +1150,13 @@ and resumes communications).
 .TP
 \fB1\fR
 A DOWN node will become available for use upon registration with a
-valid configuration only if it was set DOWN due to being non\-responsive. 
-If the node was set DOWN for any other reason (low memory, prolog failure, 
-epilog failure, etc.), its state will not automatically be changed.  
+valid configuration only if it was set DOWN due to being non\-responsive.
+If the node was set DOWN for any other reason (low memory, prolog failure,
+epilog failure, etc.), its state will not automatically be changed.
 .TP
 \fB2\fR
 A DOWN node will become available for use upon registration with a
-valid configuration.  The node could have been set DOWN for any reason. 
+valid configuration.  The node could have been set DOWN for any reason.
 .RE
 
 .TP
@@ -1189,18 +1189,18 @@ The following options apply only to \fBSchedulerType=sched/backfill\fR.
 .RS
 .TP
 \fBinterval=#\fR
-The number of seconds between iterations. 
+The number of seconds between iterations.
 Higher values result in less overhead and responsiveness.
 The default value is 5 seconds on BlueGene systems and 10 seconds otherwise.
 .TP
 \fBmax_job_bf=#\fR
-The maximum number of jobs to attempt backfill scheduling for 
+The maximum number of jobs to attempt backfill scheduling for
 (i.e. the queue depth).
 Higher values result in more overhead and less responsiveness.
 Until an attempt is made to backfill schedule a job, its expected
 initiation time value will not be set.
 The default value is 50.
-In the case of large clusters (more than 1000 nodes) configured with 
+In the case of large clusters (more than 1000 nodes) configured with
 \fBSelectType=select/cons_res\fR, setting a smaller value may be
 desirable.
 .RE
@@ -1227,8 +1227,8 @@ The default value is 30 seconds.
 
 .TP
 \fBSchedulerType\fR
-Identifies the type of scheduler to be used. 
-Note the \fBslurmctld\fR daemon must be restarted for a change in 
+Identifies the type of scheduler to be used.
+Note the \fBslurmctld\fR daemon must be restarted for a change in
 scheduler type to become effective (reconfiguring a running daemon has
 no effect for this parameter).
 The \fBscontrol\fR command can be used to manually change job priorities
@@ -1237,14 +1237,14 @@ Acceptable values include:
 .RS
 .TP
 \fBsched/builtin\fR
-for the built\-in FIFO (First In First Out) scheduler. 
+for the built\-in FIFO (First In First Out) scheduler.
 This is the default.
 .TP
 \fBsched/backfill\fR
 for a backfill scheduling module to augment the default FIFO scheduling.
-Backfill scheduling will initiate lower\-priority jobs if doing 
+Backfill scheduling will initiate lower\-priority jobs if doing
 so does not delay the expected initiation time of any higher
-priority job. 
+priority job.
 Effectiveness of backfill scheduling is dependent upon users specifying
 job time limits, otherwise all jobs will have the same time limit and
 backfilling is impossible.
@@ -1254,7 +1254,7 @@ Note documentation for the \fBSchedulerParameters\fR option above.
 Defunct option. See \fBPreemptType\fR and \fBPreemptMode\fR options.
 .TP
 \fBsched/hold\fR
-to hold all newly arriving jobs if a file "/etc/slurm.hold" 
+to hold all newly arriving jobs if a file "/etc/slurm.hold"
 exists otherwise use the built\-in FIFO scheduler
 .TP
 \fBsched/wiki\fR
@@ -1266,44 +1266,44 @@ for the Wiki interface to the Moab Cluster Suite
 
 .TP
 \fBSelectType\fR
-Identifies the type of resource selection algorithm to be used. 
-Acceptable values include 
+Identifies the type of resource selection algorithm to be used.
+Acceptable values include
 .RS
 .TP
 \fBselect/linear\fR
 for allocation of entire nodes assuming a
-one\-dimensional array of nodes in which sequentially ordered 
-nodes are preferable. 
-This is the default value for non\-BlueGene systems. 
+one\-dimensional array of nodes in which sequentially ordered
+nodes are preferable.
+This is the default value for non\-BlueGene systems.
 .TP
 \fBselect/cons_res\fR
 The resources within a node are individually allocated as
-consumable resources. 
-Note that whole nodes can be allocated to jobs for selected 
+consumable resources.
+Note that whole nodes can be allocated to jobs for selected
 partitions by using the \fIShared=Exclusive\fR option.
 See the partition \fBShared\fR parameter for more information.
 .TP
 \fBselect/bluegene\fR
-for a three\-dimensional BlueGene system. 
+for a three\-dimensional BlueGene system.
 The default value is "select/bluegene" for BlueGene systems.
 .RE
 
 .TP
 \fBSelectTypeParameters\fR
-The permitted values of \fBSelectTypeParameters\fR depend upon the 
+The permitted values of \fBSelectTypeParameters\fR depend upon the
 configured value of \fBSelectType\fR.
 \fBSelectType=select/bluegene\fR supports no \fBSelectTypeParameters\fR.
-The only supported option for \fBSelectType=select/linear\fR is 
-\fBCR_Memory\fR, which treats memory as a consumable resource and 
+The only supported option for \fBSelectType=select/linear\fR is
+\fBCR_Memory\fR, which treats memory as a consumable resource and
 prevents memory over subscription with job preemption or gang scheduling.
 The following values are supported for \fBSelectType=select/cons_res\fR:
 .RS
 .TP
 \fBCR_CPU\fR
 CPUs are consumable resources.
-There is no notion of sockets, cores or threads; 
-do not define those values in the node specification.  If these 
-are defined, unexpected results will happen when hyper\-threading 
+There is no notion of sockets, cores or threads;
+do not define those values in the node specification.  If these
+are defined, unexpected results will happen when hyper\-threading
 is enabled Procs= should be used instead.
 On a multi\-core system, each core will be considered a CPU.
 On a multi\-core and hyper\-threaded system, each thread will be
@@ -1312,39 +1312,39 @@ On single\-core systems, each CPUs will be considered a CPU.
 .TP
 \fBCR_CPU_Memory\fR
 CPUs and memory are consumable resources.
-There is no notion of sockets, cores or threads; 
-do not define those values in the node specification.  If these 
-are defined, unexpected results will happen when hyper\-threading 
+There is no notion of sockets, cores or threads;
+do not define those values in the node specification.  If these
+are defined, unexpected results will happen when hyper\-threading
 is enabled Procs= should be used instead.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
 \fBCR_Core\fR
 Cores are consumable resources.
-On nodes with hyper\-threads, each thread is counted as a CPU to 
-satisfy a job's resource requirement, but multiple jobs are not 
-allocated threads on the same core.  
+On nodes with hyper\-threads, each thread is counted as a CPU to
+satisfy a job's resource requirement, but multiple jobs are not
+allocated threads on the same core.
 .TP
 \fBCR_Core_Memory\fR
 Cores and memory are consumable resources.
-On nodes with hyper\-threads, each thread is counted as a CPU to 
-satisfy a job's resource requirement, but multiple jobs are not 
-allocated threads on the same core.  
+On nodes with hyper\-threads, each thread is counted as a CPU to
+satisfy a job's resource requirement, but multiple jobs are not
+allocated threads on the same core.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
 \fBCR_Socket\fR
-Sockets are consumable resources. 
-On nodes with multiple cores, each core or thread is counted as a CPU 
-to satisfy a job's resource requirement, but multiple jobs are not 
+Sockets are consumable resources.
+On nodes with multiple cores, each core or thread is counted as a CPU
+to satisfy a job's resource requirement, but multiple jobs are not
 allocated resources on the same socket.
-Note that jobs requesting one CPU will only be given access to 
+Note that jobs requesting one CPU will only be given access to
 that one CPU, but no other job will share the socket.
 .TP
 \fBCR_Socket_Memory\fR
-Memory and sockets are consumable resources. 
-On nodes with multiple cores, each core or thread is counted as a CPU 
+Memory and sockets are consumable resources.
+On nodes with multiple cores, each core or thread is counted as a CPU
 to satisfy a job's resource requirement, but multiple jobs are not
 allocated resources on the same socket.
-Note that jobs requesting one CPU will only be given access to 
+Note that jobs requesting one CPU will only be given access to
 that one CPU, but no other job will share the socket.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
@@ -1356,80 +1356,80 @@ Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 
 .TP
 \fBSlurmUser\fR
-The name of the user that the \fBslurmctld\fR daemon executes as. 
+The name of the user that the \fBslurmctld\fR daemon executes as.
 For security purposes, a user other than "root" is recommended.
-This user must exist on all nodes of the cluster for authentication 
+This user must exist on all nodes of the cluster for authentication
 of communications between SLURM components.
-The default value is "root". 
+The default value is "root".
 
 .TP
 \fBSlurmdUser\fR
-The name of the user that the \fBslurmd\fR daemon executes as. 
-This user must exist on all nodes of the cluster for authentication 
+The name of the user that the \fBslurmd\fR daemon executes as.
+This user must exist on all nodes of the cluster for authentication
 of communications between SLURM components.
-The default value is "root". 
+The default value is "root".
 
 .TP
 \fBSlurmctldDebug\fR
-The level of detail to provide \fBslurmctld\fR daemon's logs. 
-Values from 0 to 9 are legal, with `0' being "quiet" operation and `9' 
+The level of detail to provide \fBslurmctld\fR daemon's logs.
+Values from 0 to 9 are legal, with `0' being "quiet" operation and `9'
 being insanely verbose.
 The default value is 3.
 
 .TP
 \fBSlurmctldLogFile\fR
-Fully qualified pathname of a file into which the \fBslurmctld\fR daemon's 
+Fully qualified pathname of a file into which the \fBslurmctld\fR daemon's
 logs are written.
 The default value is none (performs logging via syslog).
 
 .TP
 \fBSlurmctldPidFile\fR
-Fully qualified pathname of a file into which the  \fBslurmctld\fR daemon 
+Fully qualified pathname of a file into which the  \fBslurmctld\fR daemon
 may write its process id. This may be used for automated signal processing.
 The default value is "/var/run/slurmctld.pid".
 
 .TP
 \fBSlurmctldPort\fR
-The port number that the SLURM controller, \fBslurmctld\fR, listens 
-to for work. The default value is SLURMCTLD_PORT as established at system 
-build time. If none is explicitly specified, it will be set to 6817.  
-NOTE: Either \fBslurmctld\fR and \fBslurmd\fR daemons must not 
-execute on the same nodes or the values of \fBSlurmctldPort\fR and 
+The port number that the SLURM controller, \fBslurmctld\fR, listens
+to for work. The default value is SLURMCTLD_PORT as established at system
+build time. If none is explicitly specified, it will be set to 6817.
+NOTE: Either \fBslurmctld\fR and \fBslurmd\fR daemons must not
+execute on the same nodes or the values of \fBSlurmctldPort\fR and
 \fBSlurmdPort\fR must be different.
 
 .TP
 \fBSlurmctldTimeout\fR
-The interval, in seconds, that the backup controller waits for the 
-primary controller to respond before assuming control. 
+The interval, in seconds, that the backup controller waits for the
+primary controller to respond before assuming control.
 The default value is 120 seconds.
 May not exceed 65533.
 
 .TP
 \fBSlurmdDebug\fR
-The level of detail to provide \fBslurmd\fR daemon's logs. 
-Values from 0 to 9 are legal, with `0' being "quiet" operation and `9' being 
+The level of detail to provide \fBslurmd\fR daemon's logs.
+Values from 0 to 9 are legal, with `0' being "quiet" operation and `9' being
 insanely verbose.
 The default value is 3.
 
 .TP
 \fBSlurmdLogFile\fR
-Fully qualified pathname of a file into which the  \fBslurmd\fR daemon's 
+Fully qualified pathname of a file into which the  \fBslurmd\fR daemon's
 logs are written.
 The default value is none (performs logging via syslog).
-Any "%h" within the name is replaced with the hostname on which the 
+Any "%h" within the name is replaced with the hostname on which the
 \fBslurmd\fR is running.
 
 .TP
 \fBSlurmdPidFile\fR
-Fully qualified pathname of a file into which the  \fBslurmd\fR daemon may write 
+Fully qualified pathname of a file into which the  \fBslurmd\fR daemon may write
 its process id. This may be used for automated signal processing.
 The default value is "/var/run/slurmd.pid".
 
 .TP
 \fBSlurmdPort\fR
-The port number that the SLURM compute node daemon, \fBslurmd\fR, listens 
-to for work. The default value is SLURMD_PORT as established at system 
-build time. If none is explicitly specified, its value will be 6818. 
+The port number that the SLURM compute node daemon, \fBslurmd\fR, listens
+to for work. The default value is SLURMD_PORT as established at system
+build time. If none is explicitly specified, its value will be 6818.
 NOTE: Either slurmctld and slurmd daemons must not execute
 on the same nodes or the values of \fBSlurmctldPort\fR and \fBSlurmdPort\fR
 must be different.
@@ -1449,12 +1449,12 @@ different shared memory region and lose track of any running jobs.
 
 .TP
 \fBSlurmdTimeout\fR
-The interval, in seconds, that the SLURM controller waits for \fBslurmd\fR 
-to respond before configuring that node's state to DOWN. 
-A value of zero indicates the node will not be tested by \fBslurmctld\fR to 
-confirm the state of \fBslurmd\fR, the node will not be automatically set to 
-a DOWN state indicating a non\-responsive \fBslurmd\fR, and some other tool 
-will take responsibility for monitoring the state of each compute node 
+The interval, in seconds, that the SLURM controller waits for \fBslurmd\fR
+to respond before configuring that node's state to DOWN.
+A value of zero indicates the node will not be tested by \fBslurmctld\fR to
+confirm the state of \fBslurmd\fR, the node will not be automatically set to
+a DOWN state indicating a non\-responsive \fBslurmd\fR, and some other tool
+will take responsibility for monitoring the state of each compute node
 and its \fBslurmd\fR daemon.
 SLURM's hierarchical communication mechanism is used to ping the \fBslurmd\fR
 daemons in order to minimize system noise and overhead.
@@ -1477,36 +1477,36 @@ may be overridden by srun's \fB\-\-prolog\fR parameter.
 
 .TP
 \fBStateSaveLocation\fR
-Fully qualified pathname of a directory into which the SLURM controller, 
-\fBslurmctld\fR, saves its state (e.g. "/usr/local/slurm/checkpoint"). 
+Fully qualified pathname of a directory into which the SLURM controller,
+\fBslurmctld\fR, saves its state (e.g. "/usr/local/slurm/checkpoint").
 SLURM state will saved here to recover from system failures.
 \fBSlurmUser\fR must be able to create files in this directory.
-If you have a \fBBackupController\fR configured, this location should be 
-readable and writable by both systems. 
-Since all running and pending job information is stored here, the use of 
+If you have a \fBBackupController\fR configured, this location should be
+readable and writable by both systems.
+Since all running and pending job information is stored here, the use of
 a reliable file system (e.g. RAID) is recommended.
 The default value is "/tmp".
-If any slurm daemons terminate abnormally, their core files will also be written 
+If any slurm daemons terminate abnormally, their core files will also be written
 into this directory.
 
 .TP
 \fBSuspendExcNodes\fR
-Specifies the nodes which are to not be placed in power save mode, even 
+Specifies the nodes which are to not be placed in power save mode, even
 if the node remains idle for an extended period of time.
 Use SLURM's hostlist expression to identify nodes.
 By default no nodes are excluded.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
-\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR,
 \fBSuspendTimeout\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBSuspendExcParts\fR
-Specifies the partitions whose nodes are to not be placed in power save 
+Specifies the partitions whose nodes are to not be placed in power save
 mode, even if the node remains idle for an extended period of time.
 Multiple partitions can be identified and separated by commas.
 By default no nodes are excluded.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
-\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR
 \fBSuspendTimeout\fR, and \fBSuspendExcNodes\fR.
 
 .TP
@@ -1514,15 +1514,15 @@ Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
 \fBSuspendProgram\fR is the program that will be executed when a node
 remains idle for an extended period of time.
 This program is expected to place the node into some power save mode.
-This can be used to reduce the frequency and voltage of a node or 
+This can be used to reduce the frequency and voltage of a node or
 completely power the node off.
 The program executes as \fBSlurmUser\fR.
 The argument to the program will be the names of nodes to
 be placed into power savings mode (using SLURM's hostlist
 expression format).
 By default, no program is run.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
-\fBResumeRate\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
+\fBResumeRate\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 
 .TP
@@ -1532,28 +1532,28 @@ The value is number of nodes per minute and it can be used to prevent
 a large drop in power power consumption (e.g. after a large job completes).
 A value of zero results in no limits being imposed.
 The default value is 60 nodes per minute.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
-\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBSuspendTime\fR
-Nodes which remain idle for this number of seconds will be placed into 
-power save mode by \fBSuspendProgram\fR. 
+Nodes which remain idle for this number of seconds will be placed into
+power save mode by \fBSuspendProgram\fR.
 A value of \-1 disables power save mode and is the default.
-Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
-\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTimeout\fR, 
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTimeout\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBSuspendTimeout\fR
-Maximum time permitted (in second) between when a node suspend request 
+Maximum time permitted (in second) between when a node suspend request
 is issued and when the node shutdown.
-At that time the node must ready for a resume request to be issued 
-as needed for new work. 
+At that time the node must ready for a resume request to be issued
+as needed for new work.
 The default value is 30 seconds.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, 
-\fBResumeTimeout\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendProgram\fR, 
+Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
+\fBResumeTimeout\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
 (https://computing.llnl.gov/linux/slurm/power_save.html).
@@ -1561,15 +1561,15 @@ More information is available at the SLURM web site
 .TP
 \fBSwitchType\fR
 Identifies the type of switch or interconnect used for application
-communications. 
+communications.
 Acceptable values include
-"switch/none" for switches not requiring special processing for job launch 
+"switch/none" for switches not requiring special processing for job launch
 or termination (Myrinet, Ethernet, and InfiniBand),
 "switch/elan" for Quadrics Elan 3 or Elan 4 interconnect.
 The default value is "switch/none".
-All SLURM daemons, commands and running jobs must be restarted for a 
+All SLURM daemons, commands and running jobs must be restarted for a
 change in \fBSwitchType\fR to take effect.
-If running jobs exist at the time \fBslurmctld\fR is restarted with a new 
+If running jobs exist at the time \fBslurmctld\fR is restarted with a new
 value of \fBSwitchType\fR, records of all jobs in any state may be lost.
 
 .TP
@@ -1580,19 +1580,19 @@ See \fBTaskProlog\fR for execution order details.
 
 .TP
 \fBTaskPlugin\fR
-Identifies the type of task launch plugin, typically used to provide 
-resource management within a node (e.g. pinning tasks to specific 
+Identifies the type of task launch plugin, typically used to provide
+resource management within a node (e.g. pinning tasks to specific
 processors).
 Acceptable values include
 "task/none" for systems requiring no special handling and
-"task/affinity" to enable the \-\-cpu_bind and/or \-\-mem_bind 
+"task/affinity" to enable the \-\-cpu_bind and/or \-\-mem_bind
 srun options.
 The default value is "task/none".
-If you "task/affinity" and encounter problems, it may be due to 
-the variety of system calls used to implement task affinity on 
-different operating systems. 
-If that is the case, you may want to use Portable Linux 
-Process Affinity (PLPA, see http://www.open-mpi.org/software/plpa), 
+If you "task/affinity" and encounter problems, it may be due to
+the variety of system calls used to implement task affinity on
+different operating systems.
+If that is the case, you may want to use Portable Linux
+Process Affinity (PLPA, see http://www.open-mpi.org/software/plpa),
 which is supported by SLURM.
 
 .TP
@@ -1603,8 +1603,8 @@ If \fBNone\fR, \fBSockets\fR, \fBCores\fR, \fBThreads\fR,
 and/or \fBVerbose\fR are specified, they will override
 the \fB\-\-cpu_bind\fR option specified by the user
 in the \fBsrun\fR command.
-\fBNone\fR, \fBSockets\fR, \fBCores\fR and \fBThreads\fR are mutually 
-exclusive and since they decrease scheduling flexibility are not generally 
+\fBNone\fR, \fBSockets\fR, \fBCores\fR and \fBThreads\fR are mutually
+exclusive and since they decrease scheduling flexibility are not generally
 recommended (select no more than one of them).
 \fBCpusets\fR and \fBSched\fR
 are mutually exclusive (select only one of them).
@@ -1612,7 +1612,7 @@ are mutually exclusive (select only one of them).
 .RS
 .TP 10
 \fBCores\fR
-Always bind to cores. 
+Always bind to cores.
 Overrides user options or automatic binding.
 .TP
 \fBCpusets\fR
@@ -1620,7 +1620,7 @@ Use cpusets to perform task affinity functions.
 By default, \fBSched\fR task binding is performed.
 .TP
 \fBNone\fR
-Perform no task binding. 
+Perform no task binding.
 Overrides user options or automatic binding.
 .TP
 \fBSched\fR
@@ -1628,27 +1628,27 @@ Use \fIsched_setaffinity\fR or \fIplpa_sched_setaffinity\fR
 (if available) to bind tasks to processors.
 .TP
 \fBSockets\fR
-Always bind to sockets. 
+Always bind to sockets.
 Overrides user options or automatic binding.
 .TP
 \fBThreads\fR
-Always bind to threads. 
+Always bind to threads.
 Overrides user options or automatic binding.
 .TP
 \fBVerbose\fR
-Verbosely report binding before tasks run. 
+Verbosely report binding before tasks run.
 Overrides user options.
 .RE
 
 .TP
 \fBTaskProlog\fR
-Fully qualified pathname of a program to be execute as the slurm job's 
+Fully qualified pathname of a program to be execute as the slurm job's
 owner prior to initiation of each task.
-Besides the normal environment variables, this has SLURM_TASK_PID 
-available to identify the process ID of the task being started. 
-Standard output from this program of the form 
-"export NAME=value" will be used to set environment variables 
-for the task being spawned. 
+Besides the normal environment variables, this has SLURM_TASK_PID
+available to identify the process ID of the task being started.
+Standard output from this program of the form
+"export NAME=value" will be used to set environment variables
+for the task being spawned.
 Standard output from this program of the form
 "print ..." will cause that line (without the leading "print ")
 to be printed to the job's standard output.
@@ -1676,20 +1676,20 @@ environment variable
 
 .TP
 \fBTmpFS\fR
-Fully qualified pathname of the file system available to user jobs for 
+Fully qualified pathname of the file system available to user jobs for
 temporary storage. This parameter is used in establishing a node's \fBTmpDisk\fR
-space. 
+space.
 The default value is "/tmp".
 
 .TP
 \fBTopologyPlugin\fR
 Identifies the plugin to be used for determining the network topology
-and optimizing job allocations to minimize network contention. 
-Acceptable values include 
-"topology/3d_torus" (default for Cray XT, IBM BlueGene and Sun Constellation 
+and optimizing job allocations to minimize network contention.
+Acceptable values include
+"topology/3d_torus" (default for Cray XT, IBM BlueGene and Sun Constellation
 systems, best\-fit logic over three\-dimensional topology)
 "topology/none" (default for other systems, best\-fit
-logic over one\-dimensional topology) and 
+logic over one\-dimensional topology) and
 "topology/tree" (determine the network topology based
 upon information contained in a topology.conf file).
 See \fBNETWORK TOPOLOGY\fR below for details.
@@ -1698,7 +1698,7 @@ information directly from the network.
 
 .TP
 \fBTrackWCKey\fR
-Boolean yes or no.  Used to set display and track of the Workload  
+Boolean yes or no.  Used to set display and track of the Workload
 Characterization Key.  Must be set to track wckey usage.
 
 .TP
@@ -1718,17 +1718,17 @@ systems.
 \fBUnkillableStepProgram\fR
 If the processes in a job step are determined to be unkillable for a period
 of time specified by the \fBUnkillableStepTimeout\fR variable, the program
-specified by \fBUnkillableStepProgram\fR will be executed.  
+specified by \fBUnkillableStepProgram\fR will be executed.
 This program can be used to take special actions to clean up the unkillable
-processes and/or notify computer administrators.  
-The program will be run \fBSlurmdUser\fR (usually "root"). 
+processes and/or notify computer administrators.
+The program will be run \fBSlurmdUser\fR (usually "root").
 By default no program is run.
 
 .TP
 \fBUnkillableStepTimeout\fR
 The length of time, in seconds, that SLURM will wait before deciding that
 processes in a job step are unkillable (after they have been signaled with
-SIGKILL) and execute \fBUnkillableStepProgram\fR as described above.  
+SIGKILL) and execute \fBUnkillableStepProgram\fR as described above.
 The default timeout value is 60 seconds.
 
 .TP
@@ -1760,69 +1760,69 @@ lines (see above), where \fBslurm\fR is the service\-name, should be added.
 
 .TP
 \fBWaitTime\fR
-Specifies how many seconds the srun command should by default wait after 
-the first task terminates before terminating all remaining tasks. The 
-"\-\-wait" option on the srun command line overrides this value. 
+Specifies how many seconds the srun command should by default wait after
+the first task terminates before terminating all remaining tasks. The
+"\-\-wait" option on the srun command line overrides this value.
 If set to 0, this feature is disabled.
 May not exceed 65533 seconds.
 
 .LP
-The configuration of nodes (or machines) to be managed by SLURM is 
-also specified in \fB/etc/slurm.conf\fR. 
+The configuration of nodes (or machines) to be managed by SLURM is
+also specified in \fB/etc/slurm.conf\fR.
 Changes in node configuration (e.g. adding nodes, changing their
 processor count, etc.) require restarting the slurmctld daemon.
 Only the NodeName must be supplied in the configuration file.
 All other node configuration information is optional.
-It is advisable to establish baseline node configurations, 
-especially if the cluster is heterogeneous. 
-Nodes which register to the system with less than the configured resources 
-(e.g. too little memory), will be placed in the "DOWN" state to 
-avoid scheduling jobs on them. 
-Establishing baseline configurations will also speed SLURM's 
-scheduling process by permitting it to compare job requirements 
-against these (relatively few) configuration parameters and 
-possibly avoid having to check job requirements  
+It is advisable to establish baseline node configurations,
+especially if the cluster is heterogeneous.
+Nodes which register to the system with less than the configured resources
+(e.g. too little memory), will be placed in the "DOWN" state to
+avoid scheduling jobs on them.
+Establishing baseline configurations will also speed SLURM's
+scheduling process by permitting it to compare job requirements
+against these (relatively few) configuration parameters and
+possibly avoid having to check job requirements
 against every individual node's configuration.
-The resources checked at node registration time are: Procs, 
-RealMemory and TmpDisk. 
-While baseline values for each of these can be established 
-in the configuration file, the actual values upon node 
-registration are recorded and these actual values may be 
-used for scheduling purposes (depending upon the value of 
+The resources checked at node registration time are: Procs,
+RealMemory and TmpDisk.
+While baseline values for each of these can be established
+in the configuration file, the actual values upon node
+registration are recorded and these actual values may be
+used for scheduling purposes (depending upon the value of
 \fBFastSchedule\fR in the configuration file.
 .LP
-Default values can be specified with a record in which 
-"NodeName" is "DEFAULT". 
-The default entry values will apply only to lines following it in the 
-configuration file and the default values can be reset multiple times 
+Default values can be specified with a record in which
+"NodeName" is "DEFAULT".
+The default entry values will apply only to lines following it in the
+configuration file and the default values can be reset multiple times
 in the configuration file with multiple entries where "NodeName=DEFAULT".
-The "NodeName=" specification must be placed on every line 
-describing the configuration of nodes. 
-In fact, it is generally possible and desirable to define the 
+The "NodeName=" specification must be placed on every line
+describing the configuration of nodes.
+In fact, it is generally possible and desirable to define the
 configurations of all nodes in only a few lines.
-This convention permits significant optimization in the scheduling 
-of larger clusters. 
+This convention permits significant optimization in the scheduling
+of larger clusters.
 In order to support the concept of jobs requiring consecutive nodes
-on some architectures, 
+on some architectures,
 node specifications should be place in this file in consecutive order.
 No single node name may be listed more than once in the configuration
 file.
-Use "DownNodes=" to record the state of nodes which are temporarily 
-in a DOWN, DRAIN or FAILING state without altering permanent 
+Use "DownNodes=" to record the state of nodes which are temporarily
+in a DOWN, DRAIN or FAILING state without altering permanent
 configuration information.
-A job step's tasks are allocated to nodes in order the nodes appear 
-in the configuration file. There is presently no capability within 
+A job step's tasks are allocated to nodes in order the nodes appear
+in the configuration file. There is presently no capability within
 SLURM to arbitrarily order a job step's tasks.
 .LP
 Multiple node names may be comma separated (e.g. "alpha,beta,gamma")
-and/or a simple node range expression may optionally be used to 
-specify numeric ranges of nodes to avoid building a configuration 
-file with large numbers of entries. 
-The node range expression can contain one  pair of square brackets 
-with a sequence of comma separated numbers and/or ranges of numbers 
+and/or a simple node range expression may optionally be used to
+specify numeric ranges of nodes to avoid building a configuration
+file with large numbers of entries.
+The node range expression can contain one  pair of square brackets
+with a sequence of comma separated numbers and/or ranges of numbers
 separated by a "\-" (e.g. "linux[0\-64,128]", or "lx[15,18,32\-33]").
-Note that the numeric ranges can include one or more leading 
-zeros to indicate the numeric portion has a fixed number of digits 
+Note that the numeric ranges can include one or more leading
+zeros to indicate the numeric portion has a fixed number of digits
 (e.g. "linux[0000\-1023]").
 Up to two numeric ranges can be included in the expression
 (e.g. "rack[0\-63]_blade[0\-41]").
@@ -1834,13 +1834,13 @@ On BlueGene systems only, the square brackets should contain
 pairs of three digit numbers separated by a "x".
 These numbers indicate the boundaries of a rectangular prism
 (e.g. "bgl[000x144,400x544]").
-See BlueGene documentation for more details. 
+See BlueGene documentation for more details.
 The node configuration specified the following information:
 
 .TP
 \fBNodeName\fR
-Name that SLURM uses to refer to a node (or base partition for 
-BlueGene systems). 
+Name that SLURM uses to refer to a node (or base partition for
+BlueGene systems).
 Typically this would be the string that "/bin/hostname \-s" returns.
 It may also be the fully qualified domain name as returned by "/bin/hostname \-f"
 (e.g. "foo1.bar.com"), or any valid domain name associated with the host
@@ -1851,19 +1851,19 @@ must be at the end of the string).
 Only short hostname forms are compatible with the
 switch/elan and switch/federation plugins at this time.
 It may also be an arbitrary string if \fBNodeHostname\fR is specified.
-If the \fBNodeName\fR is "DEFAULT", the values specified 
-with that record will apply to subsequent node specifications   
-unless explicitly set to other values in that node record or 
-replaced with a different set of default values. 
-For architectures in which the node order is significant, 
-nodes will be considered consecutive in the order defined. 
-For example, if the configuration for "NodeName=charlie" immediately 
-follows the configuration for "NodeName=baker" they will be 
+If the \fBNodeName\fR is "DEFAULT", the values specified
+with that record will apply to subsequent node specifications
+unless explicitly set to other values in that node record or
+replaced with a different set of default values.
+For architectures in which the node order is significant,
+nodes will be considered consecutive in the order defined.
+For example, if the configuration for "NodeName=charlie" immediately
+follows the configuration for "NodeName=baker" they will be
 considered adjacent in the computer.
 
 .TP
 \fBNodeHostname\fR
-Typically this would be the string that "/bin/hostname \-s" returns. 
+Typically this would be the string that "/bin/hostname \-s" returns.
 It may also be the fully qualified domain name as returned by "/bin/hostname \-f"
 (e.g. "foo1.bar.com"), or any valid domain name associated with the host
 through the host database (/etc/hosts) or DNS, depending on the resolver
@@ -1873,23 +1873,23 @@ must be at the end of the string).
 Only short hostname forms are compatible with the
 switch/elan and switch/federation plugins at this time.
 A node range expression can be used to specify a set of nodes.
-If an expression is used, the number of nodes identified by 
-\fBNodeHostname\fR on a line in the configuration file must 
+If an expression is used, the number of nodes identified by
+\fBNodeHostname\fR on a line in the configuration file must
 be identical to the number of nodes identified by \fBNodeName\fR.
-By default, the \fBNodeHostname\fR will be identical in value to 
+By default, the \fBNodeHostname\fR will be identical in value to
 \fBNodeName\fR.
 
 .TP
 \fBNodeAddr\fR
-Name that a node should be referred to in establishing 
-a communications path. 
-This name will be used as an 
-argument to the gethostbyname() function for identification. 
-If a node range expression is used to designate multiple nodes, 
+Name that a node should be referred to in establishing
+a communications path.
+This name will be used as an
+argument to the gethostbyname() function for identification.
+If a node range expression is used to designate multiple nodes,
 they must exactly match the entries in the \fBNodeName\fR
-(e.g. "NodeName=lx[0\-7] NodeAddr="elx[0\-7]"). 
+(e.g. "NodeName=lx[0\-7] NodeAddr="elx[0\-7]").
 \fBNodeAddr\fR may also contain IP addresses.
-By default, the \fBNodeAddr\fR will be identical in value to 
+By default, the \fBNodeAddr\fR will be identical in value to
 \fBNodeName\fR.
 
 .TP
@@ -1903,12 +1903,12 @@ The default value is 1.
 
 .TP
 \fBFeature\fR
-A comma delimited list of arbitrary strings indicative of some 
-characteristic associated with the node. 
-There is no value associated with a feature at this time, a node 
-either has a feature or it does not.  
-If desired a feature may contain a numeric component indicating, 
-for example, processor speed. 
+A comma delimited list of arbitrary strings indicative of some
+characteristic associated with the node.
+There is no value associated with a feature at this time, a node
+either has a feature or it does not.
+If desired a feature may contain a numeric component indicating,
+for example, processor speed.
 By default a node has no features.
 
 .TP
@@ -1916,7 +1916,7 @@ By default a node has no features.
 Number of logical processors on the node (e.g. "2").
 If \fBProcs\fR is omitted, it will set equal to the product of
 \fBSockets\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
-The default value is 1. 
+The default value is 1.
 
 .TP
 \fBRealMemory\fR
@@ -1925,8 +1925,8 @@ The default value is 1.
 
 .TP
 \fBReason\fR
-Identifies the reason for a node being in state "DOWN", "DRAINED" 
-"DRAINING", "FAIL" or "FAILING". 
+Identifies the reason for a node being in state "DOWN", "DRAINED"
+"DRAINING", "FAIL" or "FAILING".
 Use quotes to enclose a reason having more than one word.
 
 .TP
@@ -1934,83 +1934,83 @@ Use quotes to enclose a reason having more than one word.
 Number of physical processor sockets/chips on the node (e.g. "2").
 If Sockets is omitted, it will be inferred from
 \fBProcs\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
-\fBNOTE\fR: If you have multi\-core processors, you will likely 
+\fBNOTE\fR: If you have multi\-core processors, you will likely
 need to specify these parameters.
 The default value is 1.
 
 .TP
 \fBState\fR
-State of the node with respect to the initiation of user jobs. 
-Acceptable values are "DOWN", "DRAIN", "FAIL", "FAILING" and "UNKNOWN". 
+State of the node with respect to the initiation of user jobs.
+Acceptable values are "DOWN", "DRAIN", "FAIL", "FAILING" and "UNKNOWN".
 "DOWN" indicates the node failed and is unavailable to be allocated work.
 "DRAIN" indicates the node is unavailable to be allocated work.
-"FAIL" indicates the node is expected to fail soon, has 
-no jobs allocated to it, and will not be allocated 
+"FAIL" indicates the node is expected to fail soon, has
+no jobs allocated to it, and will not be allocated
 to any new jobs.
-"FAILING" indicates the node is expected to fail soon, has 
-one or more jobs allocated to it, but will not be allocated 
+"FAILING" indicates the node is expected to fail soon, has
+one or more jobs allocated to it, but will not be allocated
 to any new jobs.
-"UNKNOWN" indicates the node's state is undefined (BUSY or IDLE), 
-but will be established when the \fBslurmd\fR daemon on that node 
+"UNKNOWN" indicates the node's state is undefined (BUSY or IDLE),
+but will be established when the \fBslurmd\fR daemon on that node
 registers.
 The default value is "UNKNOWN".
-Also see the \fBDownNodes\fR parameter below. 
+Also see the \fBDownNodes\fR parameter below.
 
 .TP
 \fBThreadsPerCore\fR
 Number of logical threads in a single physical core (e.g. "2").
 Note that the SLURM can allocate resources to jobs down to the
 resolution of a core. If your system is configured with more than
-one thread per core, execution of a different job on each thread 
-is not supported. 
+one thread per core, execution of a different job on each thread
+is not supported.
 A job can execute a one task per thread from within one job step or
-execute a distinct job step on each of the threads.  
-Note also if you are running with more than 1 thread per core and running 
-the select/cons_res plugin you will want to set the SelectTypeParameters 
+execute a distinct job step on each of the threads.
+Note also if you are running with more than 1 thread per core and running
+the select/cons_res plugin you will want to set the SelectTypeParameters
 variable to something other than CR_CPU to avoid unexpected results.
 The default value is 1.
 
 .TP
 \fBTmpDisk\fR
-Total size of temporary disk storage in \fBTmpFS\fR in MegaBytes 
-(e.g. "16384"). \fBTmpFS\fR (for "Temporary File System") 
-identifies the location which jobs should use for temporary storage. 
-Note this does not indicate the amount of free 
-space available to the user on the node, only the total file 
-system size. The system administration should insure this file 
-system is purged as needed so that user jobs have access to 
-most of this space. 
-The Prolog and/or Epilog programs (specified in the configuration file) 
-might be used to insure the file system is kept clean. 
+Total size of temporary disk storage in \fBTmpFS\fR in MegaBytes
+(e.g. "16384"). \fBTmpFS\fR (for "Temporary File System")
+identifies the location which jobs should use for temporary storage.
+Note this does not indicate the amount of free
+space available to the user on the node, only the total file
+system size. The system administration should insure this file
+system is purged as needed so that user jobs have access to
+most of this space.
+The Prolog and/or Epilog programs (specified in the configuration file)
+might be used to insure the file system is kept clean.
 The default value is 0.
 
 .TP
 \fBWeight\fR
-The priority of the node for scheduling purposes. 
-All things being equal, jobs will be allocated the nodes with 
-the lowest weight which satisfies their requirements. 
-For example, a heterogeneous collection of nodes might 
+The priority of the node for scheduling purposes.
+All things being equal, jobs will be allocated the nodes with
+the lowest weight which satisfies their requirements.
+For example, a heterogeneous collection of nodes might
 be placed into a single partition for greater system
-utilization, responsiveness and capability. It would be 
-preferable to allocate smaller memory nodes rather than larger 
-memory nodes if either will satisfy a job's requirements. 
-The units of weight are arbitrary, but larger weights 
-should be assigned to nodes with more processors, memory, 
+utilization, responsiveness and capability. It would be
+preferable to allocate smaller memory nodes rather than larger
+memory nodes if either will satisfy a job's requirements.
+The units of weight are arbitrary, but larger weights
+should be assigned to nodes with more processors, memory,
 disk space, higher processor speed, etc.
 Note that if a job allocation request can not be satisfied
 using the nodes with the lowest weight, the set of nodes
 with the next lowest weight is added to the set of nodes
-under consideration for use (repeat as needed for higher 
-weight values). If you absolutely want to minimize the number 
-of higher weight nodes allocated to a job (at a cost of higher 
-scheduling overhead), give each node a distinct \fBWeight\fR 
-value and they will be added to the pool of nodes being 
+under consideration for use (repeat as needed for higher
+weight values). If you absolutely want to minimize the number
+of higher weight nodes allocated to a job (at a cost of higher
+scheduling overhead), give each node a distinct \fBWeight\fR
+value and they will be added to the pool of nodes being
 considered for scheduling individually.
 The default value is 1.
 
 .LP
-The "DownNodes=" configuration permits you to mark certain nodes as in a 
-DOWN, DRAIN, FAIL, or FAILING state without altering the permanent 
+The "DownNodes=" configuration permits you to mark certain nodes as in a
+DOWN, DRAIN, FAIL, or FAILING state without altering the permanent
 configuration information listed under a "NodeName=" specification.
 
 .TP
@@ -2019,15 +2019,15 @@ Any node name, or list of node names, from the "NodeName=" specifications.
 
 .TP
 \fBReason\fR
-Identifies the reason for a node being in state "DOWN", "DRAIN", 
-"FAIL" or "FAILING. 
+Identifies the reason for a node being in state "DOWN", "DRAIN",
+"FAIL" or "FAILING.
 \Use quotes to enclose a reason having more than one word.
 
 .TP
 \fBState\fR
-State of the node with respect to the initiation of user jobs. 
+State of the node with respect to the initiation of user jobs.
 Acceptable values are "BUSY", "DOWN", "DRAIN", "FAIL",
-"FAILING, "IDLE", and "UNKNOWN". 
+"FAILING, "IDLE", and "UNKNOWN".
 "DOWN" indicates the node failed and is unavailable to be allocated work.
 "DRAIN" indicates the node is unavailable to be allocated work.
 "FAIL" indicates the node is expected to fail soon, has
@@ -2036,25 +2036,25 @@ to any new jobs.
 "FAILING" indicates the node is expected to fail soon, has
 one or more jobs allocated to it, but will not be allocated
 to any new jobs.
-"FUTURE" indicates the node is defined for future use and need not 
-exist when the SLURM daemons are started. These nodes can be made available 
-for use simply by updating the node state using the scontrol command rather 
-than restarting the slurmctld daemon. After these nodes are made available, 
-change their \fRState\fR in the slurm.conf file. Until these nodes are made 
-available, they will not be seen using any SLURM commands or APIs nor will 
-any attempt be made to contact them. 
-"UNKNOWN" indicates the node's state is undefined (BUSY or IDLE), 
-but will be established when the \fBslurmd\fR daemon on that node 
+"FUTURE" indicates the node is defined for future use and need not
+exist when the SLURM daemons are started. These nodes can be made available
+for use simply by updating the node state using the scontrol command rather
+than restarting the slurmctld daemon. After these nodes are made available,
+change their \fRState\fR in the slurm.conf file. Until these nodes are made
+available, they will not be seen using any SLURM commands or APIs nor will
+any attempt be made to contact them.
+"UNKNOWN" indicates the node's state is undefined (BUSY or IDLE),
+but will be established when the \fBslurmd\fR daemon on that node
 registers.
 The default value is "UNKNOWN".
 
 .LP
-The partition configuration permits you to establish different job 
-limits or access controls for various groups (or partitions) of nodes. 
-Nodes may be in more than one partition, making partitions serve 
-as general purpose queues. 
-For example one may put the same set of nodes into two different 
-partitions, each with different constraints (time limit, job sizes, 
+The partition configuration permits you to establish different job
+limits or access controls for various groups (or partitions) of nodes.
+Nodes may be in more than one partition, making partitions serve
+as general purpose queues.
+For example one may put the same set of nodes into two different
+partitions, each with different constraints (time limit, job sizes,
 groups allowed to use the partition, etc.).
 Jobs are allocated resources within a single partition.
 Default values can be specified with a record in which
@@ -2065,7 +2065,7 @@ in the configuration file with multiple entries where "PartitionName=DEFAULT".
 The "PartitionName=" specification must be placed on every line
 describing the configuration of partitions.
 \fBNOTE:\fR Put all parameters for each partition on a single line.
-Each line of partition configuration information should 
+Each line of partition configuration information should
 represent a different partition.
 The partition configuration file contains the following information:
 
@@ -2079,42 +2079,42 @@ The default value is "ALL".
 
 .TP
 \fBAllowGroups\fR
-Comma separated list of group IDs which may execute jobs in the partition. 
-If at least one group associated with the user attempting to execute the 
+Comma separated list of group IDs which may execute jobs in the partition.
+If at least one group associated with the user attempting to execute the
 job is in AllowGroups, he will be permitted to use this partition.
 Jobs executed as user root can use any partition without regard to
 the value of AllowGroups.
-If user root attempts to execute a job as another user (e.g. using 
-srun's \-\-uid option), this other user must be in one of groups 
+If user root attempts to execute a job as another user (e.g. using
+srun's \-\-uid option), this other user must be in one of groups
 identified by AllowGroups for the job to successfully execute.
-The default value is "ALL". 
+The default value is "ALL".
 
 .TP
 \fBDefault\fR
-If this keyword is set, jobs submitted without a partition 
+If this keyword is set, jobs submitted without a partition
 specification will utilize this partition.
-Possible values are "YES" and "NO". 
+Possible values are "YES" and "NO".
 The default value is "NO".
 
 .TP
 \fBDisableRootJobs\fR
 If set to "YES" then user root will be prevented from running any jobs
 on this partition.
-The default value will be the value of \fBDisableRootJobs\fR set 
+The default value will be the value of \fBDisableRootJobs\fR set
 outside of a partition specification (which is "NO", allowing user
 root to execute jobs).
 
 .TP
 \fBHidden\fR
-Specifies if the partition and its jobs are to be hidden by default. 
-Hidden partitions will by default not be reported by the SLURM 
+Specifies if the partition and its jobs are to be hidden by default.
+Hidden partitions will by default not be reported by the SLURM
 APIs or commands.
-Possible values are "YES" and "NO". 
+Possible values are "YES" and "NO".
 The default value is "NO".
 
 .TP
 \fBMaxNodes\fR
-Maximum count of nodes (c\-nodes for BlueGene systems) which 
+Maximum count of nodes (c\-nodes for BlueGene systems) which
 may be allocated to any single job.
 The default value is "UNLIMITED", which is represented internally as \-1.
 This limit does not apply to jobs executed by SlurmUser or user root.
@@ -2122,10 +2122,10 @@ This limit does not apply to jobs executed by SlurmUser or user root.
 .TP
 \fBMaxTime\fR
 Maximum run time limit for jobs.
-Format is minutes, minutes:seconds, hours:minutes:seconds, 
+Format is minutes, minutes:seconds, hours:minutes:seconds,
 days\-hours, days\-hours:minutes, days\-hours:minutes:seconds or
-"UNLIMITED". 
-Time resolution is one minute and second values are rounded up to 
+"UNLIMITED".
+Time resolution is one minute and second values are rounded up to
 the next minute.
 This limit does not apply to jobs executed by SlurmUser or user root.
 
@@ -2137,23 +2137,23 @@ Format is the same as for MaxTime.
 
 .TP
 \fBMinNodes\fR
-Minimum count of nodes (or base partitions for BlueGene systems) which 
+Minimum count of nodes (or base partitions for BlueGene systems) which
 may be allocated to any single job.
 The default value is 1.
 This limit does not apply to jobs executed by SlurmUser or user root.
 
 .TP
 \fBNodes\fR
-Comma separated list of nodes (or base partitions for BlueGene systems) 
-which are associated with this partition. 
-Node names may be specified using the node range expression syntax 
-described above. A blank list of nodes 
-(i.e. "Nodes= ") can be used if one wants a partition to exist, 
+Comma separated list of nodes (or base partitions for BlueGene systems)
+which are associated with this partition.
+Node names may be specified using the node range expression syntax
+described above. A blank list of nodes
+(i.e. "Nodes= ") can be used if one wants a partition to exist,
 but have no resources (possibly on a temporary basis).
 
 .TP
 \fBPartitionName\fR
-Name by which the partition may be referenced (e.g. "Interactive"). 
+Name by which the partition may be referenced (e.g. "Interactive").
 This name can be specified by users when submitting jobs.
 If the \fBPartitionName\fR is "DEFAULT", the values specified
 with that record will apply to subsequent partition specifications
@@ -2162,28 +2162,28 @@ replaced with a different set of default values.
 
 .TP
 \fBPriority\fR
-Jobs submitted to a higher priority partition will be dispatched 
+Jobs submitted to a higher priority partition will be dispatched
 before pending jobs in lower priority partitions and if possible
 they will preempt running jobs from lower priority partitions.
-Note that a partition's priority takes precedence over a job's 
+Note that a partition's priority takes precedence over a job's
 priority.
 The value may not exceed 65533.
 
 
 .TP
 \fBRootOnly\fR
-Specifies if only user ID zero (i.e. user \fIroot\fR) may allocate resources 
-in this partition. User root may allocate resources for any other user, 
-but the request must be initiated by user root. 
-This option can be useful for a partition to be managed by some 
-external entity (e.g. a higher\-level job manager) and prevents 
+Specifies if only user ID zero (i.e. user \fIroot\fR) may allocate resources
+in this partition. User root may allocate resources for any other user,
+but the request must be initiated by user root.
+This option can be useful for a partition to be managed by some
+external entity (e.g. a higher\-level job manager) and prevents
 users from directly using those resources.
-Possible values are "YES" and "NO". 
+Possible values are "YES" and "NO".
 The default value is "NO".
 
 .TP
 \fBShared\fR
-Controls the ability of the partition to execute more than one job at a 
+Controls the ability of the partition to execute more than one job at a
 time on each resource (node, socket or core depending upon the value
 of \fBSelectTypeParameters\fR).
 If resources are to be shared, avoiding memory over\-subscription
@@ -2210,11 +2210,11 @@ For more information see the following web pages:
 .TP 12
 \fBEXCLUSIVE\fR
 Allocates entire nodes to jobs even with select/cons_res configured.
-This can be used to allocate whole nodes in some partitions 
+This can be used to allocate whole nodes in some partitions
 and individual processors in other partitions.
-.TP 
+.TP
 \fBFORCE\fR
-Make all resources in the partition available for sharing 
+Make all resources in the partition available for sharing
 without any means for users to disable it.
 May be followed with a colon and maximum number of jobs in
 running or suspended state.
@@ -2227,35 +2227,35 @@ with gang scheduling (\fBSchedulerType=sched/gang\fR).
 \fBYES\fR
 Make nodes in the partition available for sharing, but provides
 the user with a means of getting dedicated resources.
-If \fBSelectType=select/cons_res\fR, then resources will be 
-over\-subscribed unless explicitly disabled in the job submit 
+If \fBSelectType=select/cons_res\fR, then resources will be
+over\-subscribed unless explicitly disabled in the job submit
 request using the "\-\-exclusive" option.
 With \fBSelectType=select/bluegene\fR or \fBSelectType=select/linear\fR,
 resources will only be over\-subscribed when explicitly requested
 by the user using the "\-\-share" option on job submission.
-May be followed with a colon and maximum number of jobs in 
+May be followed with a colon and maximum number of jobs in
 running or suspended state.
 For example "Shared=YES:4" enables each node, socket or
 core to execute up to four jobs at once.
-Recommended only for systems running with gang scheduling 
+Recommended only for systems running with gang scheduling
 (\fBSchedulerType=sched/gang\fR).
 .RE
 
 .TP
 \fBState\fR
-State of partition or availability for use.  Possible values 
+State of partition or availability for use.  Possible values
 are "UP" or "DOWN". The default value is "UP".
 
 .SH "Prolog and Epilog Scripts"
-There are a variety of prolog and epilog program options that 
-execute with various permissions and at various times. 
-The four options most likely to be used are: 
+There are a variety of prolog and epilog program options that
+execute with various permissions and at various times.
+The four options most likely to be used are:
 \fBProlog\fR and \fBEpilog\fR (executed once on each compute node
 for each job) plus \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR
 (executed once on the \fBControlMachine\fR for each job).
 
-NOTE:  Standard output and error messages are normally not preserved. 
-Explicitly write output and error messages to an appropriate location 
+NOTE:  Standard output and error messages are normally not preserved.
+Explicitly write output and error messages to an appropriate location
 if you which to preserve that information.
 
 NOTE:  The Prolog script is ONLY run on any individual
@@ -2279,11 +2279,11 @@ BlueGene partition name.
 Available on BlueGene systems only.
 .TP
 \fBSLURM_JOB_ACCOUNT\fR
-Account name used for the job. 
+Account name used for the job.
 Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
 .TP
 \fBSLURM_JOB_CONSTRAINTS\fR
-Features required to run the job. 
+Features required to run the job.
 Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
 .TP
 \fBSLURM_JOB_GID\fR
@@ -2319,38 +2319,38 @@ User name of the job's owner.
 
 .SH "NETWORK TOPOLOGY"
 SLURM is able to optimize job allocations to minimize network contention.
-Special SLURM logic is used to optimize allocations on systems with a 
+Special SLURM logic is used to optimize allocations on systems with a
 three\-dimensional interconnect (BlueGene, Sun Constellation, etc.)
-and information about configuring those systems are available on 
+and information about configuring those systems are available on
 web pages available here: <https://computing.llnl.gov/linux/slurm/>.
-For a hierarchical network, SLURM needs to have detailed information 
+For a hierarchical network, SLURM needs to have detailed information
 about how nodes are configured on the network switches.
 .LP
-Given network topology information, SLURM allocates all of a job's 
-resources onto a single leaf of the network (if possible) using a best\-fit 
+Given network topology information, SLURM allocates all of a job's
+resources onto a single leaf of the network (if possible) using a best\-fit
 algorithm.
 Otherwise it will allocate a job's resources onto multiple leaf switches
-so as to minimize the use of higher\-level switches. 
+so as to minimize the use of higher\-level switches.
 The \fBTopologyPlugin\fR parameter controls which plugin is used to
-collect network topology information. 
-The only values presently supported are 
-"topology/3d_torus" (default for IBM BlueGene, Sun Constellation and 
+collect network topology information.
+The only values presently supported are
+"topology/3d_torus" (default for IBM BlueGene, Sun Constellation and
 Cray XT systems, performs best\-fit logic over three\-dimensional topology),
-"topology/none" (default for other systems, 
+"topology/none" (default for other systems,
 best\-fit logic over one\-dimensional topology),
 "topology/tree" (determine the network topology based
 upon information contained in a topology.conf file,
 see "man topology.conf" for more information).
 Future plugins may gather topology information directly from the network.
-The topology information is optional. 
+The topology information is optional.
 If not provided, SLURM will perform a best\-fit algorithm assuming the
-nodes are in a one\-dimensional array as configured and the communications 
+nodes are in a one\-dimensional array as configured and the communications
 cost is related to the node distance in this array.
 
 .SH "RELOCATING CONTROLLERS"
-If the cluster's computers used for the primary or backup controller 
-will be out of service for an extended period of time, it may be 
-desirable to relocate them. 
+If the cluster's computers used for the primary or backup controller
+will be out of service for an extended period of time, it may be
+desirable to relocate them.
 In order to do so, follow this procedure:
 .LP
 1. Stop the SLURM daemons
@@ -2361,20 +2361,20 @@ In order to do so, follow this procedure:
 .br
 4. Restart the SLURM daemons
 .LP
-There should be no loss of any running or pending jobs. 
-Insure that any nodes added to the cluster have the current 
-slurm.conf file installed. 
+There should be no loss of any running or pending jobs.
+Insure that any nodes added to the cluster have the current
+slurm.conf file installed.
 .LP
-\fBCAUTION:\fR If two nodes are simultaneously configured as the 
-primary controller (two nodes on which \fBControlMachine\fR specify 
-the local host and the \fBslurmctld\fR daemon is executing on each), 
+\fBCAUTION:\fR If two nodes are simultaneously configured as the
+primary controller (two nodes on which \fBControlMachine\fR specify
+the local host and the \fBslurmctld\fR daemon is executing on each),
 system behavior will be destructive.
-If a compute node has an incorrect \fBControlMachine\fR or 
-\fBBackupController\fR parameter, that node may be rendered 
+If a compute node has an incorrect \fBControlMachine\fR or
+\fBBackupController\fR parameter, that node may be rendered
 unusable, but no other harm will result.
 
 .SH "EXAMPLE"
-.LP 
+.LP
 #
 .br
 # Sample /etc/slurm.conf for dev[0\-25].llnl.gov
@@ -2397,7 +2397,7 @@ BackupAddr=edev1
 .br
 AuthType=auth/munge
 .br
-Epilog=/usr/local/slurm/epilog 
+Epilog=/usr/local/slurm/epilog
 .br
 Prolog=/usr/local/slurm/prolog
 .br
@@ -2501,8 +2501,8 @@ details.
 
 .SH "SEE ALSO"
 .LP
-\fBbluegene.conf\fR(5), \fBgethostbyname\fR(3), 
-\fBgetrlimit\fR(2), \fBgroup\fR(5), \fBhostname\fR(1), 
-\fBscontrol\fR(1), \fBslurmctld\fR(8), \fBslurmd\fR(8), 
-\fBslurmdbd\fR(8), \fBslurmdbd.conf\fR(5), \fBsrun(1)\fR, 
+\fBbluegene.conf\fR(5), \fBgethostbyname\fR(3),
+\fBgetrlimit\fR(2), \fBgroup\fR(5), \fBhostname\fR(1),
+\fBscontrol\fR(1), \fBslurmctld\fR(8), \fBslurmd\fR(8),
+\fBslurmdbd\fR(8), \fBslurmdbd.conf\fR(5), \fBsrun(1)\fR,
 \fBspank(8)\fR, \fBsyslog\fR(2), \fBtopology.conf\fR(5), \fBwiki.conf\fR(5)
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index f8998b2c74c6874324998480d8e55f4558fd4c07..e810068ce7897cab2b5a8eeeec25dd927b4e0536 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -1,21 +1,21 @@
 .TH "slurmdbd.conf" "5" "February 2009" "slurmdbd.conf 2.0" "Slurm configuration file"
 .SH "NAME"
-slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file 
+slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file
 
 .SH "DESCRIPTION"
-\fB/etc/slurmdb.conf\fP is an ASCII file which describes Slurm Database 
+\fB/etc/slurmdb.conf\fP is an ASCII file which describes Slurm Database
 Daemon (SlurmDBD) configuration information.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. 
+DEFAULT_SLURM_CONF parameter.
 .LP
-The contents of the file are case insensitive except for the names of nodes 
-and files. Any text following a "#" in the configuration file is treated 
-as a comment through the end of that line. 
+The contents of the file are case insensitive except for the names of nodes
+and files. Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
-Changes to the configuration file take effect upon restart of 
+Changes to the configuration file take effect upon restart of
 SlurmDbd or daemon receipt of the SIGHUP signal unless otherwise noted.
 .LP
-This file should be only on the computer where SlurmDBD executes and 
+This file should be only on the computer where SlurmDBD executes and
 should only be readable by the user which executes SlurmDBD (e.g. "slurm").
 This file should be protected from unauthorized access since it
 contains a database password.
@@ -23,8 +23,8 @@ The overall configuration parameters available include:
 
 .TP
 \fBArchiveDir\fR
-If ArchiveScript is not set the slurmdbd will generate a text file that can be 
-read in anytime with sacctmgr load filename.  This directory is where the 
+If ArchiveScript is not set the slurmdbd will generate a text file that can be
+read in anytime with sacctmgr load filename.  This directory is where the
 file will be placed archive has ran.  Default is /tmp.
 
 .TP
@@ -38,11 +38,11 @@ Boolean, yes to archive job data, no other wise.  Default is no.
 .TP
 \fBArchiveScript\fR
 This script is executed periodically in order to transfer accounting
-records out of the database into an archive. The script is executed 
+records out of the database into an archive. The script is executed
 with a no arguments, The following environment variables are set.
 .RS
 .TP
-\fBSLURM_ARCHIVE_EVENTS\fR 
+\fBSLURM_ARCHIVE_EVENTS\fR
 1 for archive events 0 otherwise.
 .TP
 \fBSLURM_ARCHIVE_LAST_EVENT\fR
@@ -54,13 +54,13 @@ Time of last event start to archive.
 \fBSLURM_ARCHIVE_LAST_JOB\fR
 Time of last job submit to archive.
 .TP
-\fBSLURM_ARCHIVE_STEPS\fR 
+\fBSLURM_ARCHIVE_STEPS\fR
 1 for archive steps 0 otherwise.
 .TP
 \fBSLURM_ARCHIVE_LAST_STEP\fR
 Time of last step start to archive.
 .TP
-\fBSLURM_ARCHIVE_SUSPEND\fR 
+\fBSLURM_ARCHIVE_SUSPEND\fR
 1 for archive suspend data 0 otherwise.
 .TP
 \fBSLURM_ARCHIVE_LAST_SUSPEND\fR
@@ -78,126 +78,126 @@ Boolean, yes to archive suspend data, no other wise.  Default is no.
 
 .TP
 \fBAuthInfo\fR
-Additional information to be used for authentication of communications 
+Additional information to be used for authentication of communications
 with the Slurm control daemon (slurmctld) on each cluster.
 The interpretation of this option is specific to the configured \fBAuthType\fR.
-In the case of \fIauth/munge\fR, this can be configured to use a Munge daemon 
-specifically configured to provide authentication between clusters while the 
-default Munge daemon provides authentication within a cluster. 
+In the case of \fIauth/munge\fR, this can be configured to use a Munge daemon
+specifically configured to provide authentication between clusters while the
+default Munge daemon provides authentication within a cluster.
 In that case, this will specify the pathname of the socket to use.
-The default value is NULL, which results in the default authentication 
+The default value is NULL, which results in the default authentication
 mechanism being used.
 
 .TP
 \fBAuthType\fR
-Define the authentication method for communications between SLURM 
-components. 
-Acceptable values at present include "auth/none", "auth/authd", 
+Define the authentication method for communications between SLURM
+components.
+Acceptable values at present include "auth/none", "auth/authd",
 and "auth/munge".
-The default value is "auth/none", which means the UID included in 
-communication messages is not verified. 
-This may be fine for testing purposes, but 
+The default value is "auth/none", which means the UID included in
+communication messages is not verified.
+This may be fine for testing purposes, but
 \fBdo not use "auth/none" if you desire any security\fR.
 "auth/authd" indicates that Brett Chun's authd is to be used (see
 "http://www.theether.org/authd/" for more information).
 "auth/munge" indicates that LLNL's Munge system is to be used
-(this is the best supported authentication mechanism for SLURM, 
+(this is the best supported authentication mechanism for SLURM,
 see "http://home.gna.org/munge/" for more information).
-SlurmDbd must be terminated prior to changing the value of \fBAuthType\fR 
+SlurmDbd must be terminated prior to changing the value of \fBAuthType\fR
 and later restarted.
 
 .TP
 \fBDbdBackupHost\fR
-The name of the machine where the backup Slurm Database Daemon is executed. 
+The name of the machine where the backup Slurm Database Daemon is executed.
 This host must have access to the same underlying database specified by
 the 'Storage' options mentioned below.
 This should be a node name without the full domain name.  I.e., the hostname
-returned by the \fIgethostname()\fR function cut at the first dot (e.g. use 
+returned by the \fIgethostname()\fR function cut at the first dot (e.g. use
 "tux001" rather than "tux001.my.com").
 
 .TP
 \fBDbdHost\fR
-The name of the machine where the Slurm Database Daemon is executed. 
+The name of the machine where the Slurm Database Daemon is executed.
 This should be a node name without the full domain name.  I.e., the hostname
-returned by the \fIgethostname()\fR function cut at the first dot (e.g. use 
+returned by the \fIgethostname()\fR function cut at the first dot (e.g. use
 "tux001" rather than "tux001.my.com").  This value must be specified.
 
 .TP
 \fBDbdPort\fR
-The port number that the Slurm Database Daemon (slurmdbd) listens 
-to for work. The default value is SLURMDBD_PORT as established at system 
+The port number that the Slurm Database Daemon (slurmdbd) listens
+to for work. The default value is SLURMDBD_PORT as established at system
 build time. If none is explicitly specified, it will be set to 6819.
 This value must be equal to the \fBSlurmDbdPort\fR parameter in the
 slurm.conf file.
 
 .TP
 \fBDebugLevel\fR
-The level of detail to provide the Slurm Database Daemon's logs. 
-Values from 0 to 9 are legal, with `0' being "quiet" operation and 
+The level of detail to provide the Slurm Database Daemon's logs.
+Values from 0 to 9 are legal, with `0' being "quiet" operation and
 `9' being insanely verbose.
 The default value is 3.
 
 .TP
 \fBDefaultQOS\fR
-When adding a new cluster this will be used as the qos for the cluster 
+When adding a new cluster this will be used as the qos for the cluster
 unless something is explicitly set by the admin with the create.
- 
+
 .TP
 \fBLogFile\fR
-Fully qualified pathname of a file into which the Slurm Database Daemon's 
+Fully qualified pathname of a file into which the Slurm Database Daemon's
 logs are written.
 The default value is none (performs logging via syslog).
 
 .TP
 \fBMessageTimeout\fR
 Time permitted for a round\-trip communication to complete
-in seconds. Default value is 10 seconds. 
+in seconds. Default value is 10 seconds.
 
 .TP
 \fBPidFile\fR
-Fully qualified pathname of a file into which the Slurm Database Daemon 
+Fully qualified pathname of a file into which the Slurm Database Daemon
 may write its process ID. This may be used for automated signal processing.
 The default value is "/var/run/slurmdbd.pid".
 
 .TP
 \fBPluginDir\fR
-Identifies the places in which to look for SLURM plugins. 
-This is a colon\-separated list of directories, like the PATH 
-environment variable. 
+Identifies the places in which to look for SLURM plugins.
+This is a colon\-separated list of directories, like the PATH
+environment variable.
 The default value is "/usr/local/lib/slurm".
 
 .TP
 \fBPrivateData\fR
 This controls what type of information is hidden from regular users.
 By default, all information is visible to all users.
-User \fBSlurmUser\fR, \fBroot\fR, and users with AdminLevel=Admin can always 
+User \fBSlurmUser\fR, \fBroot\fR, and users with AdminLevel=Admin can always
 view all information.
 Multiple values may be specified with a comma separator.
 Acceptable values include:
 .RS
 .TP
-\fBaccounts\fR 
-prevents users from viewing any account definitions unless they are 
+\fBaccounts\fR
+prevents users from viewing any account definitions unless they are
 coordinators of them.
 .TP
-\fBjobs\fR 
+\fBjobs\fR
 prevents users from viewing job records belonging
 to other users unless they are coordinators of the association running the job
 when using sacct.
 .TP
-\fBreservations\fR 
-restricts getting reservation information to users with operator status 
+\fBreservations\fR
+restricts getting reservation information to users with operator status
 and above.
 .TP
-\fBusage\fR  
-prevents users from viewing usage of any other user.  
+\fBusage\fR
+prevents users from viewing usage of any other user.
 This applys to sreport.
 .TP
-\fBusers\fR  
-prevents users from viewing information of any user 
-other than themselves, this also makes it so users can only see 
-associations they deal with.  
-Coordinators can see associations of all users they are coordinator of, 
+\fBusers\fR
+prevents users from viewing information of any user
+other than themselves, this also makes it so users can only see
+associations they deal with.
+Coordinators can see associations of all users they are coordinator of,
 but can only see themselves when listing users.
 .RE
 
@@ -224,7 +224,7 @@ If zero (default), then job step records are never purged.
 
 .TP
 \fBPurgeSuspendMonths\fR
-Records of individual suspend times for jobs over this age are purged from the 
+Records of individual suspend times for jobs over this age are purged from the
 database.
 Aggregated information will be preserved indefinitely.
 The time is a numeric value and is a number of months.
@@ -232,11 +232,11 @@ If zero (default), then job step records are never purged.
 
 .TP
 \fBSlurmUser\fR
-The name of the user that the \fBslurmctld\fR daemon executes as. 
+The name of the user that the \fBslurmctld\fR daemon executes as.
 This user must exist on the machine executing the Slurm Database Daemon
 and have the same user ID as the hosts on which \fBslurmctld\fR execute.
 For security purposes, a user other than "root" is recommended.
-The default value is "root". 
+The default value is "root".
 
 .TP
 \fBStorageHost\fR
@@ -247,20 +247,20 @@ Ideally this should be the host on which slurmdbd executes.
 .TP
 \fBStorageBackupHost\fR
 Define the name of the backup host the database is running where we are going
-to store the data.  This can be viewed as a backup solution when the 
-StorageHost is not responding.  It is up to the backup solution to enforce the 
-coherency of the accounting information between the two hosts. With clustered 
+to store the data.  This can be viewed as a backup solution when the
+StorageHost is not responding.  It is up to the backup solution to enforce the
+coherency of the accounting information between the two hosts. With clustered
 database solutions (acitve/passive HA), you would not need to use this feature.
 Default is none.
 
 .TP
 \fBStorageLoc\fR
-Specify the name of the database as the location where accounting 
+Specify the name of the database as the location where accounting
 records are written.
 
 .TP
 \fBStoragePass\fR
-Define the password used to gain access to the database to store 
+Define the password used to gain access to the database to store
 the job accounting data.
 
 .TP
@@ -271,7 +271,7 @@ with the database.
 .TP
 \fBStorageType\fR
 Define the accounting storage mechanism type.
-Acceptable values at present include 
+Acceptable values at present include
 "accounting_storage/gold", "accounting_storage/mysql", and
 "accounting_storage/pgsql".
 The value "accounting_storage/gold" indicates that account records
@@ -279,13 +279,13 @@ will be written to Gold
 (http://www.clusterresources.com/pages/products/gold-allocation-manager.php),
 which maintains its own database.
 The value "accounting_storage/mysql" indicates that accounting records
-should be written to a MySQL database specified by the 
+should be written to a MySQL database specified by the
 \fStorageLoc\fR parameter.
 The value "accounting_storage/pgsql" indicates that accounting records
-should be written to a PostgreSQL database specified by the 
-\fBStorageLoc\fR parameter.  This plugin is not complete and 
+should be written to a PostgreSQL database specified by the
+\fBStorageLoc\fR parameter.  This plugin is not complete and
 should not be used if wanting to use associations.  It will however work with
-basic accounting of jobs and job steps.  If interested in 
+basic accounting of jobs and job steps.  If interested in
 completing please email slurm-dev@lists.llnl.gov.
 This value must be specified.
 
@@ -296,11 +296,11 @@ with to store the job accounting data.
 
 .TP
 \fBTrackWCKey\fR
-Boolean yes or no.  Used to set display and track of the Workload  
+Boolean yes or no.  Used to set display and track of the Workload
 Characterization Key. Must be set to track wckey usage.
 
 .SH "EXAMPLE"
-.LP 
+.LP
 #
 .br
 # Sample /etc/slurmdbd.conf
diff --git a/doc/man/man5/topology.conf.5 b/doc/man/man5/topology.conf.5
index e14eb927b6d82be4e1c1100987537ca226723009..ef0c6f1590a4dce5586b1b617b41644c0209da26 100644
--- a/doc/man/man5/topology.conf.5
+++ b/doc/man/man5/topology.conf.5
@@ -5,17 +5,17 @@ topology.conf \- Slurm configuration file for defining the network topology
 
 .SH "DESCRIPTION"
 \fB/etc/topology.conf\fP is an ASCII file which describes the
-cluster's network topology for optimized job resource allocation. 
+cluster's network topology for optimized job resource allocation.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the 
-same directory as the \fBslurm.conf\fP file. 
+DEFAULT_SLURM_CONF parameter. The file will always be located in the
+same directory as the \fBslurm.conf\fP file.
 .LP
 Paramter names are case insensitive.
-Any text following a "#" in the configuration file is treated 
-as a comment through the end of that line. 
+Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
-Changes to the configuration file take effect upon restart of 
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution 
+Changes to the configuration file take effect upon restart of
+SLURM daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 The network topology configuration one one line defining a switch name and
@@ -47,7 +47,7 @@ The units used are arbitrary and this information is currently not used.
 It may be used in the future to optimize resource allocations.
 
 .SH "EXAMPLE"
-.LP 
+.LP
 .br
 ##################################################################
 .br
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index c7004b5fed3d54121959bc858fe4b8625e0895e4..e6f45689e3c143d980e9db037ceda4d974df1fd3 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -2,34 +2,34 @@
 .SH "NAME"
 wiki.conf \- Slurm configuration file for wiki and wiki2 scheduler plugins
 .SH "DESCRIPTION"
-\fB/etc/wiki.conf\fP is an ASCII file which describes wiki and wiki2 
-scheduler specific SLURM configuration information. 
+\fB/etc/wiki.conf\fP is an ASCII file which describes wiki and wiki2
+scheduler specific SLURM configuration information.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the 
-same directory as the \fBslurm.conf\fP file. 
+DEFAULT_SLURM_CONF parameter. The file will always be located in the
+same directory as the \fBslurm.conf\fP file.
 .LP
 Paramter names are case insensitive.
-Any text following a "#" in the configuration file is treated 
-as a comment through the end of that line. 
+Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
-Changes to the configuration file take effect upon restart of 
-SLURM daemons, daemon receipt of the SIGHUP signal, or execution 
+Changes to the configuration file take effect upon restart of
+SLURM daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
 The overall configuration parameters available include:
 
 .TP
 \fBAuthKey\fR
-Authentication key for communications. 
-This should be no more than a 32\-bit unsigned integer and match the 
-KEY configured in the \fBmoab\-private.cnf\fR file (for the Moab Scheduler) 
+Authentication key for communications.
+This should be no more than a 32\-bit unsigned integer and match the
+KEY configured in the \fBmoab\-private.cnf\fR file (for the Moab Scheduler)
 or the \fB\-\-with-key=\fR value used to configure the Maui Scheduler.
 
 .TP
 \fBEHost\fR
 Name the computer on which Moab server executes.
-It is used in establishing a communications path for event notification. 
-By default \fBEHost\fR will be identical in value to the 
+It is used in establishing a communications path for event notification.
+By default \fBEHost\fR will be identical in value to the
 \fBControlAddr\fR configured in slurm.conf.
 Not applicable to wiki plugin, only the wiki2 plugin.
 
@@ -43,44 +43,44 @@ Not applicable to wiki plugin, only the wiki2 plugin.
 
 .TP
 \fBEPort\fR
-Port to be used to notify Moab of events (job submitted to SLURM, 
-job terminates, etc.). 
+Port to be used to notify Moab of events (job submitted to SLURM,
+job terminates, etc.).
 This numeric value should match EPORT configured in the
 \fBmoab.cnf\fR file.
 Not applicable to wiki plugin, only the wiki2 plugin.
 
 .TP
 \fBExcludePartitions\fR
-Identifies partitions whose jobs are to be scheduled directly 
-by SLURM rather than Moab/Maui. 
-This only effects jobs which are submitted using SLURM 
+Identifies partitions whose jobs are to be scheduled directly
+by SLURM rather than Moab/Maui.
+This only effects jobs which are submitted using SLURM
 commands (i.e. srun, salloc or sbatch, NOT msub from Moab).
-These jobs will be scheduled on a First\-Come\-First\-Served 
-basis directly by SLURM. 
-Note that SLURM recognizes jobs submitted via msub based 
+These jobs will be scheduled on a First\-Come\-First\-Served
+basis directly by SLURM.
+Note that SLURM recognizes jobs submitted via msub based
 upon the value \fBFirstJobId\fR configured in \fIslurm.conf\fR.
 Set the values \fBMINJOBID\fR and \fBMAXJOBID\fR in \fImoab.cfg\fR
-accordingly. 
+accordingly.
 For example MINJOBID=1, MAXJOBID=65535 and FirstJobId=65536.
-Jobs submitted using msub will have job ID values in the range 
+Jobs submitted using msub will have job ID values in the range
 of 1 and 65535 while jobs submitted directly using SLURM commands
 will have a job ID of 65536 or higher.
-Moab/Maui controls for resource reservation, fair share 
+Moab/Maui controls for resource reservation, fair share
 scheduling, etc. will not apply to the initiation of these jobs.
-While Moab/Maui will not control the initiation of jobs in these 
+While Moab/Maui will not control the initiation of jobs in these
 partitions, it will account for and report the jobs.
 If more than one partition is to be scheduled directly by
 SLURM, use a comma separator between their names.
-This may provide faster response times than Moab/Maui scheduling. 
+This may provide faster response times than Moab/Maui scheduling.
 
 .TP
 \fBHidePartitionJobs\fR
 Identifies partitions whose jobs are not to be reported to Moab/Maui.
 These jobs will not be accounted for or otherwise visible to Moab/Maui.
 Any partitions listed here must also be listed in \fBExcludePartitions\fR.
-This only effects jobs which are submitted using SLURM commands (i.e. 
+This only effects jobs which are submitted using SLURM commands (i.e.
 \fIsrun\fR, \fIsalloc\fR or \fIsbatch\fR, NOT \fImsub\fR from Moab).
-If more than one partition is to have its jobs hidden, use a comma 
+If more than one partition is to have its jobs hidden, use a comma
 separator between their names.
 
 .TP
@@ -94,7 +94,7 @@ No data compression. Each host name is listed individually.
 .TP
 \fB1\fR
 SLURM hostlist expressions are exchanged with task counts
-(e.g. "tux[0\-16]*2") in job state information and job 
+(e.g. "tux[0\-16]*2") in job state information and job
 initiation requests.
 .TP
 \fB2\fR
@@ -103,19 +103,19 @@ SLURM hostlist expressions are used to report node state information.
 
 .TP
 \fBJobAggregationTime\fR
-This is used to avoid notifying Moab of large numbers of events 
+This is used to avoid notifying Moab of large numbers of events
 occuring about the same time.
-If an event occurs within this number of seconds since Moab was 
+If an event occurs within this number of seconds since Moab was
 last notified of an event, another notification is not sent.
 This should be an integer number of seconds.
 The default value is 10 seconds.
-The value should match JOBAGGREGATIONTIME configured in the 
+The value should match JOBAGGREGATIONTIME configured in the
 \fBmoab.cnf\fR file.
 Not applicable to wiki plugin, only the wiki2 plugin.
 
 .TP
 \fBJobPriority\fR
-Controls initial job priority. 
+Controls initial job priority.
 The default value is "hold".
 Not applicable to wiki plugin, only the wiki2 plugin.
 .RS
@@ -128,7 +128,7 @@ Hold all incomming jobs until Moab or Maui tell them to run
 .RE
 
 .SH "EXAMPLE"
-.LP 
+.LP
 .br
 ##################################################################
 .br
@@ -164,7 +164,7 @@ EHost=tux0
 .br
 EHostBackup=tux1
 .br
-# Moab event notifcation throttle, matches JOBAGGREGATIONTIME 
+# Moab event notifcation throttle, matches JOBAGGREGATIONTIME
 .br
 # in moab.cfg (integer value in seconds)
 .br
diff --git a/doc/man/man8/slurmctld.8 b/doc/man/man8/slurmctld.8
index 43b852b93731d65c65667559658e6d07a33f7c81..f77b143e958801c88b34613948353e492ad63666 100644
--- a/doc/man/man8/slurmctld.8
+++ b/doc/man/man8/slurmctld.8
@@ -4,22 +4,22 @@ slurmctld \- The central management daemon of Slurm.
 .SH "SYNOPSIS"
 \fBslurmctld\fR [\fIOPTIONS\fR...]
 .SH "DESCRIPTION"
-\fBslurmctld\fR is the central management daemon of Slurm. It monitors 
-all other Slurm daemons and resources, accepts work (jobs), and allocates 
-resources to those jobs. Given the critical functionality of \fBslurmctld\fR, 
-there may be a backup server to assume these functions in the event that 
-the primary server fails. 
+\fBslurmctld\fR is the central management daemon of Slurm. It monitors
+all other Slurm daemons and resources, accepts work (jobs), and allocates
+resources to those jobs. Given the critical functionality of \fBslurmctld\fR,
+there may be a backup server to assume these functions in the event that
+the primary server fails.
 .TP
 OPTIONS
 .TP
 \fB\-c\fR
-Clear all previous \fBslurmctld\fR state from its last checkpoint. 
-If not specified, previously running jobs will be preserved along 
-with the state of DOWN, DRAINED and DRAINING nodes and the associated 
+Clear all previous \fBslurmctld\fR state from its last checkpoint.
+If not specified, previously running jobs will be preserved along
+with the state of DOWN, DRAINED and DRAINING nodes and the associated
 reason field for those nodes.
 .TP
 \fB\-D\fR
-Debug mode. Execute \fBslurmctld\fR in the foreground with logging to stdout. 
+Debug mode. Execute \fBslurmctld\fR in the foreground with logging to stdout.
 .TP
 \fB\-f <file>\fR
 Read configuration from the specified file. See \fBNOTES\fR below.
diff --git a/doc/man/man8/slurmd.8 b/doc/man/man8/slurmd.8
index e81a7a57f0cf8b244efab2243c6c15b10a7190ac..6cfde1df931ae7b739038a314e7b8349945cff31 100644
--- a/doc/man/man8/slurmd.8
+++ b/doc/man/man8/slurmd.8
@@ -7,14 +7,14 @@ slurmd \- The compute node daemon for SLURM.
 \fBslurmd\fR [\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
-\fBslurmd\fR is the compute node daemon of Slurm. It monitors all tasks 
-running on the compute node , accepts work (tasks), launches tasks, and kills 
+\fBslurmd\fR is the compute node daemon of Slurm. It monitors all tasks
+running on the compute node , accepts work (tasks), launches tasks, and kills
 running tasks upon request.
 .TP
 OPTIONS
 .TP
 \fB\-c\fR
-Clear system locks as needed. This may be required if \fBslurmd\fR terminated 
+Clear system locks as needed. This may be required if \fBslurmd\fR terminated
 abnormally.
 .TP
 \fB\-d <file>\fR
diff --git a/doc/man/man8/slurmdbd.8 b/doc/man/man8/slurmdbd.8
index 0a3f377f0115c7c08b758bb6624f6df9f642da71..df8725ada3c26895940aa39544c5102fde3b1279 100644
--- a/doc/man/man8/slurmdbd.8
+++ b/doc/man/man8/slurmdbd.8
@@ -12,7 +12,7 @@ for Slurm. This is particularly useful for archiving accounting records.
 OPTIONS
 .TP
 \fB\-D\fR
-Debug mode. Execute \fBslurmdbd\fR in the foreground with logging to stdout. 
+Debug mode. Execute \fBslurmdbd\fR in the foreground with logging to stdout.
 .TP
 \fB\-h\fR
 Help; print a brief summary of command options.
diff --git a/doc/man/man8/slurmstepd.8 b/doc/man/man8/slurmstepd.8
index 3a81beca98151d01994299a3196be182e923e571..3bbd3a711a74dcfc5fa13f8153700c226c14fee2 100644
--- a/doc/man/man8/slurmstepd.8
+++ b/doc/man/man8/slurmstepd.8
@@ -4,11 +4,11 @@ slurmstepd \- The job step manager for SLURM.
 .SH "SYNOPSIS"
 \fBslurmstepd\fR
 .SH "DESCRIPTION"
-\fBslurmstepd\fR is a job step manager for SLURM. 
-It is spawned by the \fBslurmd\fR daemon when a job step is launched 
-and terminates when the job step does. 
+\fBslurmstepd\fR is a job step manager for SLURM.
+It is spawned by the \fBslurmd\fR daemon when a job step is launched
+and terminates when the job step does.
 It is responsible for managing input and output (stdin, stdout and stderr)
-for the job step along with its accounting and signal processing. 
+for the job step along with its accounting and signal processing.
 \fBslurmstepd\fR should not be initiated by users or system administrators.
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8
index e159add109a97d0d7323d67576d14be6fe6d1d2b..237f0b3652752c15d2bf83e11be03f0441dff1a2 100644
--- a/doc/man/man8/spank.8
+++ b/doc/man/man8/spank.8
@@ -1,14 +1,14 @@
 .TH "SPANK" "8" "June 2009" "SPANK" "SLURM plug\-in architecture for Node and job (K)control"
 
 .SH "NAME"
-\fBSPANK\fR \- SLURM Plug\-in Architecture for Node and job (K)control 
+\fBSPANK\fR \- SLURM Plug\-in Architecture for Node and job (K)control
 
 .SH "DESCRIPTION"
 This manual briefly describes the capabilities of the SLURM Plug\-in
 architecture for Node and job Kontrol (\fBSPANK\fR) as well as the \fBSPANK\fR
 configuration file: (By default: \fBplugstack.conf\fP.)
 .LP
-\fBSPANK\fR provides a very generic interface for stackable plug\-ins 
+\fBSPANK\fR provides a very generic interface for stackable plug\-ins
 which may be used to dynamically modify the job launch code in
 SLURM. \fBSPANK\fR plugins may be built without access to SLURM source
 code. They need only be compiled against SLURM's \fBspank.h\fR header file,
@@ -45,7 +45,7 @@ Plugins may query the context in which they are running with the
 \fBSPANK\fR plugins may be called from multiple points during the SLURM job
 launch. A plugin may define the following functions:
 .TP 2
-\fBslurm_spank_init\fR 
+\fBslurm_spank_init\fR
 Called just after plugins are loaded. In remote context, this is just
 after job step is initialized. This function is called before any plugin
 option processing.
@@ -59,30 +59,30 @@ the \fBinit\fR callback, then process user options, and finally take some
 action in \fBslurm_spank_init_post_opt\fR if necessary.
 .TP
 \fBslurm_spank_local_user_init\fR
-Called in local (\fBsrun\fR) context only after all 
-options have been processed. 
+Called in local (\fBsrun\fR) context only after all
+options have been processed.
 This is called after the job ID and step IDs are available.
-This happens in \fBsrun\fR after the allocation is made, but before 
+This happens in \fBsrun\fR after the allocation is made, but before
 tasks are launched.
 .TP
-\fBslurm_spank_user_init\fR 
+\fBslurm_spank_user_init\fR
 Called after privileges are temporarily dropped. (remote context only)
 .TP
 \fBslurm_spank_task_init_privileged\fR
 Called for each task just after fork, but before all elevated privileges
 are dropped. (remote context only)
 .TP
-\fBslurm_spank_task_init\fR 
+\fBslurm_spank_task_init\fR
 Called for each task just before execve(2). (remote context only)
 .TP
-\fBslurm_spank_task_post_fork\fR 
+\fBslurm_spank_task_post_fork\fR
 Called for each task from parent process after fork(2) is complete.
 Due to the fact that \fBslurmd\fR does not exec any tasks until all
 tasks have completed fork(2), this call is guaranteed to run before
 the user task is executed. (remote context only)
 .TP
 \fBslurm_spank_task_exit\fR
-Called for each task as its exit status is collected by SLURM. 
+Called for each task as its exit status is collected by SLURM.
 (remote context only)
 .TP
 \fBslurm_spank_exit\fR
@@ -115,7 +115,7 @@ which has the following prototype:
 .LP
 The return value is 1 if the symbol is supported, 0 if not.
 .LP
-\fBSPANK\fR plugins do not have direct access to internally defined SLURM 
+\fBSPANK\fR plugins do not have direct access to internally defined SLURM
 data structures. Instead, information about the currently executing
 job is obtained via the \fBspank_get_item\fR function call.
 .nf
@@ -155,12 +155,12 @@ searches the job's environment for the environment variable
 of length \fIlen\fR.  \fBspank_setenv\fR allows a \fBSPANK\fR
 plugin to set or overwrite a variable in the job's environment,
 and \fBspank_unsetenv\fR unsets an environment variable in
-the job's environment. The prototypes are: 
+the job's environment. The prototypes are:
 .nf
 
- spank_err_t \fBspank_getenv\fR (spank_t spank, const char *var, 
+ spank_err_t \fBspank_getenv\fR (spank_t spank, const char *var,
 		           char *buf, int len);
- spank_err_t \fBspank_setenv\fR (spank_t spank, const char *var, 
+ spank_err_t \fBspank_setenv\fR (spank_t spank, const char *var,
 		           const char *val, int overwrite);
  spank_err_t \fBspank_unsetenv\fR (spank_t spank, const char *var);
 .fi
@@ -169,21 +169,21 @@ These are only necessary in remote context since modifications of
 the standard process environment using \fBsetenv\fR(3), \fBgetenv\fR(3),
 and \fBunsetenv\fR(3) may be used in local context.
 .LP
-Functions are also available from within the \fBSPANK\fR plugins to 
-establish environment variables to be exported to the SLURM 
-\fBPrologSlurmctld\fR, \fBProlog\fR, \fBEpilog\fR and \fBEpilogSlurmctld\fR 
+Functions are also available from within the \fBSPANK\fR plugins to
+establish environment variables to be exported to the SLURM
+\fBPrologSlurmctld\fR, \fBProlog\fR, \fBEpilog\fR and \fBEpilogSlurmctld\fR
 programs. While designed for \fBSPANK\fR plugin use, hackers could
 insert arbitrary environment variables, so their use by the various
 prolog and epilog programs should avoid possible security compromises.
 SLURM does block the setting of LD_PRELOAD and PATH as a precausionary measure.
-These environment variables are not otherwise visible 
-to the job or \fBSPANK\fR functions. 
-These environment variable functons may only called from the local contact. 
-The syntax of these functions is identical to the getenv, 
+These environment variables are not otherwise visible
+to the job or \fBSPANK\fR functions.
+These environment variable functons may only called from the local contact.
+The syntax of these functions is identical to the getenv,
 setenv, and unsetenv functions respectively:
 .nf
   char *\fBspank_get_job_env\fR(const char *name);
-  int   \fBspank_set_job_env\fR(const char *name, const char *value, 
+  int   \fBspank_set_job_env\fR(const char *name, const char *value,
 			        int overwrite);
   int   \fBspank_unset_job_env\fR(const char *name);
 .fi
@@ -219,12 +219,12 @@ a \fBstruct spank_option\fR which is declared in \fB<slurm/spank.h>\fR as
 .nf
 
    struct spank_option {
-      char *         name;    
+      char *         name;
       char *         arginfo;
-      char *         usage; 
+      char *         usage;
       int            has_arg;
-      int            val;    
-      spank_opt_cb_f cb;     
+      int            val;
+      spank_opt_cb_f cb;
    };
 
 .fi
@@ -241,7 +241,7 @@ an argument.
 .TP
 .I usage
 is a short description of the option suitable for \-\-help output.
-.TP 
+.TP
 .I has_arg
 0 if option takes an argument, 1 if option takes no argument, and
 2 if the option takes an optional argument. (See \fBgetopt_long\fR(3)).
@@ -251,26 +251,26 @@ A plugin\-local value to return to the option callback function.
 .TP
 .I cb
 A callback function that is invoked when the plugin option is
-registered with SLURM. \fBspank_opt_cb_f\fR is typedef'd in 
-\fB<slurm/spank.h>\fR as 
+registered with SLURM. \fBspank_opt_cb_f\fR is typedef'd in
+\fB<slurm/spank.h>\fR as
 .nf
 
-  typedef int (*spank_opt_cb_f) (int val, const char *optarg, 
+  typedef int (*spank_opt_cb_f) (int val, const char *optarg,
 		                 int remote);
 
 .fi
 Where \fIval\fR is the value of the \fIval\fR field in the \fBspank_option\fR
 struct, \fIoptarg\fR is the supplied argument if applicable, and \fIremote\fR
-is 0 if the function is being called from the "local" host 
+is 0 if the function is being called from the "local" host
 (e.g. \fBsrun\fR) or 1 from the "remote" host (\fBslurmd\fR).
 .LP
 Plugin options may be registered with SLURM using
 the \fBspank_option_register\fR function. This function is only valid
-when called from the plugin's \fBslurm_spank_init\fR handler, and 
+when called from the plugin's \fBslurm_spank_init\fR handler, and
 registers one option at a time. The prototype is
 .nf
 
-   spank_err_t spank_option_register (spank_t sp, 
+   spank_err_t spank_option_register (spank_t sp,
 		   struct spank_option *opt);
 
 .fi
@@ -299,7 +299,7 @@ the use of \fBspank_option_register\fR is preferred. When using the
 filled with zeros. A \fBSPANK_OPTIONS_TABLE_END\fR macro is provided
 in \fB<slurm/spank.h>\fR for this purpose.
 .LP
-When an option is provided by the user on the local side, \fBSLURM\fR will 
+When an option is provided by the user on the local side, \fBSLURM\fR will
 immediately invoke the option's callback with \fIremote\fR=0. This
 is meant for the plugin to do local sanity checking of the option before
 the value is sent to the remote side during job launch. If the argument
@@ -317,12 +317,12 @@ functionality based on the value of user\-provided options.
 The default \fBSPANK\fR plug\-in stack configuration file is
 \fBplugstack.conf\fR in the same directory as \fBslurm.conf\fR(5),
 though this may be changed via the SLURM config parameter
-\fIPlugStackConfig\fR.  Normally the \fBplugstack.conf\fR file 
+\fIPlugStackConfig\fR.  Normally the \fBplugstack.conf\fR file
 should be identical on all nodes of the cluster.
 The config file lists \fBSPANK\fR plugins,
 one per line, along with whether the plugin is \fIrequired\fR or
 \fIoptional\fR, and any global arguments that are to be passed to
-the plugin for runtime configuration.  Comments are preceded with '#' 
+the plugin for runtime configuration.  Comments are preceded with '#'
 and extend to the end of the line.  If the configuration file
 is missing or empty, it will simply be ignored.
 .LP
@@ -369,7 +369,7 @@ a spank plugin stack.
 The \fBSPANK\fR config file is re\-read on each job launch, so editing
 the config file will not affect running jobs. However care should
 be taken so that a partially edited config file is not read by a
-launching job.  
+launching job.
 
 .SH "EXAMPLES"
 .LP
@@ -382,19 +382,19 @@ Simple \fBSPANK\fR config file:
 # required?       plugin                     args
 #
 optional          renice.so                  min_prio=\-10
-required          /usr/lib/slurm/test.so     
+required          /usr/lib/slurm/test.so
 
 .fi
 .LP
 The following is a simple \fBSPANK\fR plugin to modify the nice value
-of job tasks. This plugin adds a \-\-renice=[prio] option to \fBsrun\fR 
+of job tasks. This plugin adds a \-\-renice=[prio] option to \fBsrun\fR
 which users can use to set the priority of all remote tasks. Priority may
 also be specified via a SLURM_RENICE environment variable. A minimum
 priority may be established via a "min_prio" parameter in \fBplugstack.conf\fR
 (See above for example).
 .nf
 
-/*  
+/*
  *   To compile:
  *    gcc \-shared \-o renice.so renice.c
  *
@@ -453,7 +453,7 @@ int slurm_spank_init (spank_t sp, int ac, char **av)
     for (i = 0; i < ac; i++) {
         if (strncmp ("min_prio=", av[i], 9) == 0) {
             const char *optarg = av[i] + 9;
-            if (_str2prio (optarg, &min_prio) < 0) 
+            if (_str2prio (optarg, &min_prio) < 0)
                 slurm_error ("Ignoring invalid min_prio value: %s", av[i]);
         }
         else {
@@ -474,7 +474,7 @@ int slurm_spank_task_post_fork (spank_t sp, int ac, char **av)
     int taskid;
 
     if (prio == PRIO_NOT_SET) {
-        /* 
+        /*
          *  See if SLURM_RENICE env var is set by user
          */
         char val [1024];
@@ -488,11 +488,11 @@ int slurm_spank_task_post_fork (spank_t sp, int ac, char **av)
         }
 
     if (prio < min_prio)
-        slurm_error ("%s=%d not allowed, using min=%d", 
+        slurm_error ("%s=%d not allowed, using min=%d",
             PRIO_ENV_VAR, prio, min_prio);
     }
 
-    if (prio < min_prio) 
+    if (prio < min_prio)
         prio = min_prio;
 
     spank_get_item (sp, S_TASK_GLOBAL_ID, &taskid);
@@ -514,7 +514,7 @@ static int _str2prio (const char *str, int *p2int)
     char *p;
 
     l = strtol (str, &p, 10);
-    if ((*p != '\0') || (l < \-20) || (l > 20)) 
+    if ((*p != '\0') || (l < \-20) || (l > 20))
         return (\-1);
 
     *p2int = (int) l;
@@ -528,14 +528,14 @@ static int _renice_opt_process (int val, const char *optarg, int remote)
         slurm_error ("renice: invalid argument!");
         return (\-1);
     }
-        
+
     if (_str2prio (optarg, &prio) < 0) {
         slurm_error ("Bad value for \-\-renice: %s", optarg);
         return (\-1);
     }
 
-    if (prio < min_prio) 
-        slurm_error ("\-\-renice=%d not allowed, will use min=%d", 
+    if (prio < min_prio)
+        slurm_error ("\-\-renice=%d not allowed, will use min=%d",
                      prio, min_prio);
 
     return (0);
diff --git a/doc/pubdesign/Makefile b/doc/pubdesign/Makefile
index 282b42f89fdfff3c3ca69d839bfb401d76858e4d..1863cb2280eaa5b434befcae3f93beab4cc84a9b 100644
--- a/doc/pubdesign/Makefile
+++ b/doc/pubdesign/Makefile
@@ -10,7 +10,7 @@
 
 REPORT = report
 
-TEX = ../common/llnlCoverPage.tex $(REPORT).tex 
+TEX = ../common/llnlCoverPage.tex $(REPORT).tex
 
 FIGDIR = ../figures
 FIGS = $(FIGDIR)/allocate-init.eps \
@@ -22,12 +22,12 @@ FIGS = $(FIGDIR)/allocate-init.eps \
        $(FIGDIR)/slurm-arch.eps \
        $(FIGDIR)/times.eps
 
-PLOTS = $(FIGDIR)/times.eps 
+PLOTS = $(FIGDIR)/times.eps
 
 BIB = ../common/project.bib
 
 %.eps: %.dia
-	dia --nosplash -e $@ $< 
+	dia --nosplash -e $@ $<
 %.eps: %.gpl
 	gnuplot $<
 %.eps: %.fig
@@ -37,9 +37,9 @@ BIB = ../common/project.bib
 %.ps: %.dvi
 	dvips -K -t letter -o $(@F) $(<F)
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
-all: $(REPORT).ps 
+all: $(REPORT).ps
 
 
 $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
@@ -54,6 +54,6 @@ $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
 view: $(REPORT).ps
 	ghostview $(REPORT) &
 
-clean: 
+clean:
 	rm -f *~ *.dvi *.log *.aux $(REPORT).ps *.blg *.bbl #*.eps #*.gif *.ps
-	      
+
diff --git a/doc/pubdesign/report.tex b/doc/pubdesign/report.tex
index eacaa353a51a61cac2e22cdf4edf2a1b29682b7b..bfc7f3f557d10f6a0b74e40a87c0cb40c587d0dc 100644
--- a/doc/pubdesign/report.tex
+++ b/doc/pubdesign/report.tex
@@ -1,30 +1,30 @@
-% Presenter info: 
+% Presenter info:
 % http://www.linuxclustersinstitute.org/Linux-HPC-Revolution/presenterinfo.html
 %
 % Main Text Layout
-% Set the main text in 10 point Times Roman or Times New Roman (normal), 
-% (no boldface), using single line spacing. All text should be in a single 
-% column and justified. 
+% Set the main text in 10 point Times Roman or Times New Roman (normal),
+% (no boldface), using single line spacing. All text should be in a single
+% column and justified.
 %
 % Opening Style (First Page)
-% This includes the title of the paper, the author names, organization and 
+% This includes the title of the paper, the author names, organization and
 % country, the abstract, and the first part of the paper.
-% * Start the title 35mm down from the top margin in Times Roman font, 16 
-%   point bold, range left. Capitalize only the first letter of the first 
+% * Start the title 35mm down from the top margin in Times Roman font, 16
+%   point bold, range left. Capitalize only the first letter of the first
 %   word and proper nouns.
-% * On a new line, type the authors' names, organizations, and country only 
-%   (not the full postal address, although you may add the name of your 
+% * On a new line, type the authors' names, organizations, and country only
+%   (not the full postal address, although you may add the name of your
 %   department), in Times Roman, 11 point italic, range left.
-% * Start the abstract with the heading two lines below the last line of the 
+% * Start the abstract with the heading two lines below the last line of the
 %   address. Set the abstract in Times Roman, 12 point bold.
-% * Leave one line, then type the abstract in Times Roman 10 point, justified 
+% * Leave one line, then type the abstract in Times Roman 10 point, justified
 %   with single line spacing.
 %
 % Other Pages
-% For the second and subsequent pages, use the full 190 x 115mm area and type 
-% in one column beginning at the upper right of each page, inserting tables 
+% For the second and subsequent pages, use the full 190 x 115mm area and type
+% in one column beginning at the upper right of each page, inserting tables
 % and figures as required.
-% 
+%
 % We're recommending the Lecture Notes in Computer Science styles from
 % Springer Verlag --- google on Springer Verlag LaTeX.  These work nicely,
 % *except* that it does not work with the hyperref package. Sigh.
@@ -112,13 +112,13 @@ architecture and functionality.
 
 \section{Overview}
 
-Simple Linux Utility for Resource Management (SLURM)\footnote{A tip of 
+Simple Linux Utility for Resource Management (SLURM)\footnote{A tip of
 the hat to Matt Groening and creators of {\em Futurama},
-where Slurm is the most popular carbonated beverage in the universe.} 
-is a resource management system suitable for use on large and small Linux 
-clusters.  After surveying \cite{Jette2002} resource managers available 
-for Linux and finding none that were simple, highly scalable, and portable 
-to different cluster architectures and interconnects, the authors set out 
+where Slurm is the most popular carbonated beverage in the universe.}
+is a resource management system suitable for use on large and small Linux
+clusters.  After surveying \cite{Jette2002} resource managers available
+for Linux and finding none that were simple, highly scalable, and portable
+to different cluster architectures and interconnects, the authors set out
 to design a new system.
 
 The resulting design is a resource management system with the following general
@@ -126,25 +126,25 @@ characteristics:
 
 \begin{itemize}
 \item {\tt Simplicity}: SLURM is simple enough to allow motivated end users
-to understand its source code and add functionality.  The authors will 
-avoid the temptation to add features unless they are of general appeal. 
+to understand its source code and add functionality.  The authors will
+avoid the temptation to add features unless they are of general appeal.
 
-\item {\tt Open Source}: SLURM is available to everyone and will remain free. 
-Its source code is distributed under the GNU General Public 
+\item {\tt Open Source}: SLURM is available to everyone and will remain free.
+Its source code is distributed under the GNU General Public
 License \cite{GPL2002}.
 
-\item {\tt Portability}: SLURM is written in the C language, with a GNU 
-{\em autoconf} configuration engine.  
-While initially written for Linux, other Unix-like operating systems 
+\item {\tt Portability}: SLURM is written in the C language, with a GNU
+{\em autoconf} configuration engine.
+While initially written for Linux, other Unix-like operating systems
 should be easy porting targets.
-SLURM also supports a general purpose ``plugin'' mechanism, which 
-permits a variety of different infrastructures to be easily supported. 
-The SLURM configuration file specifies which set of plugin modules 
-should be used. 
+SLURM also supports a general purpose ``plugin'' mechanism, which
+permits a variety of different infrastructures to be easily supported.
+The SLURM configuration file specifies which set of plugin modules
+should be used.
 
 \item {\tt Interconnect Independence}: SLURM currently supports UDP/IP-based
-communication and the Quadrics Elan3 interconnect.  Adding support for 
-other interconnects, including topography constraints, is straightforward 
+communication and the Quadrics Elan3 interconnect.  Adding support for
+other interconnects, including topography constraints, is straightforward
 and utilizes the plugin mechanism described above.
 
 \item {\tt Scalability}: SLURM is designed for scalability to clusters of
@@ -161,7 +161,7 @@ executing.  The user command controlling a job, {\tt srun}, may detach
 and reattach from the parallel tasks at any time.  Nodes allocated to
 a job are available for reuse as soon as the job(s) allocated to that
 node terminate.  If some nodes fail to complete job termination in a
-timely fashion because of hardware or software problems, only the 
+timely fashion because of hardware or software problems, only the
 scheduling of those tardy nodes will be affected.
 
 \item {\tt Security}: SLURM employs crypto technology to authenticate
@@ -277,7 +277,7 @@ explained in more detail below.
 
 \slurmd\ is a multi-threaded daemon running on each compute node and
 can be compared to a remote shell daemon: it reads the common SLURM
-configuration file and saved state information, 
+configuration file and saved state information,
 notifies the controller that it is active, waits
 for work, executes the work, returns status, then waits for more work.
 Because it initiates jobs for other users, it must run as user {\em root}.
@@ -301,7 +301,7 @@ a process may include terminating all members of a process group and
 executing an epilog program.
 
 \item {\tt Stream Copy Service}: Allow handling of stderr, stdout, and
-stdin of remote tasks. Job input may be redirected 
+stdin of remote tasks. Job input may be redirected
 from a single file or multiple files (one per task), an
 \srun\ process, or /dev/null.  Job output may be saved into local files or
 returned to the \srun\ command. Regardless of the location of stdout/err,
@@ -318,7 +318,7 @@ requests to any set of locally managed processes.
 Most SLURM state information exists in {\tt slurmctld}, also known as
 the controller.  \slurmctld\ is multi-threaded with independent read
 and write locks for the various data structures to enhance scalability.
-When \slurmctld\ starts, it reads the SLURM configuration file and 
+When \slurmctld\ starts, it reads the SLURM configuration file and
 any previously saved state information.  Full controller state
 information is written to disk periodically, with incremental changes
 written to disk immediately for fault tolerance.  \slurmctld\ runs in
@@ -364,9 +364,9 @@ scheduling cycle as described above.
 
 \subsection{Command Line Utilities}
 
-The command line utilities offer users access to remote execution and 
-job control. They also permit administrators to dynamically change 
-the system configuration. These commands use SLURM APIs that are 
+The command line utilities offer users access to remote execution and
+job control. They also permit administrators to dynamically change
+the system configuration. These commands use SLURM APIs that are
 directly available for more sophisticated applications.
 
 \begin{itemize}
@@ -411,9 +411,9 @@ by the SLURM libraries.  A plugin provides a customized implementation
 of a well-defined API connected to tasks such as authentication,
 interconnect fabric, and task scheduling.  A common set of functions is defined
 for use by all of the different infrastructures of a particular variety.
-For example, the authentication plugin must define functions such as 
+For example, the authentication plugin must define functions such as
 {\tt slurm\_auth\_create} to create a credential, {\tt slurm\_auth\_verify}
-to verify a credential to approve or deny authentication, 
+to verify a credential to approve or deny authentication,
 {\tt slurm\_auth\_get\_uid} to get the uid associated with a specific
 credential, etc.  It also must define the data structure used, a plugin
 type, a plugin version number, etc.  When a SLURM daemon is initiated, it
@@ -449,7 +449,7 @@ SLURM has a simple security model: any user of the cluster may submit
 parallel jobs to execute and cancel his own jobs.  Any user may view
 SLURM configuration and state information.  Only privileged users
 may modify the SLURM configuration, cancel any job, or perform other
-restricted activities.  Privileged users in SLURM include the users 
+restricted activities.  Privileged users in SLURM include the users
 {\em root} and {\em SlurmUser} (as defined in the SLURM configuration file).
 If permission to modify SLURM configuration is required by others, set-uid
 programs may be used to grant specific permissions to specific users.
@@ -460,7 +460,7 @@ Historically, inter-node authentication has been accomplished via the use
 of reserved ports and set-uid programs. In this scheme, daemons check the
 source port of a request to ensure that it is less than a certain value
 and thus only accessible by {\em root}. The communications over that
-connection are then implicitly trusted.  Because reserved ports are a 
+connection are then implicitly trusted.  Because reserved ports are a
 limited resource and set-uid programs are a possible security concern,
 we have employed a credential-based authentication scheme that
 does not depend on reserved ports. In this design, a SLURM authentication
@@ -471,12 +471,12 @@ and gid from the credential as the authoritative identity of the sender.
 
 The actual implementation of the SLURM authentication credential is
 relegated to an ``auth'' plugin.  We presently have implemented three
-functional authentication plugins: authd\cite{Authd2002}, 
+functional authentication plugins: authd\cite{Authd2002},
 Munge, and none.  The ``none'' authentication type employs a null
 credential and is only suitable for testing and networks where security
 is not a concern. Both the authd and Munge implementations employ
 cryptography to generate a credential for the requesting user that
-may then be authoritatively verified on any remote nodes. However, 
+may then be authoritatively verified on any remote nodes. However,
 authd assumes a secure network and Munge does not.  Other authentication
 implementations, such as a credential based on Kerberos, should be easy
 to develop using the auth plugin API.
@@ -511,7 +511,7 @@ Unix groups using a {\em AllowGroups} specification.
 
 In this example a user wishes to run a job in batch mode, in which \srun\
 returns immediately and the job executes in the background when resources
-are available.  The job is a two-node run of script containing {\em mping}, 
+are available.  The job is a two-node run of script containing {\em mping},
 a simple MPI application.  The user submits the job:
 
 \begin{verbatim}
@@ -530,17 +530,17 @@ current working directory, and command line option information. By
 default, stdout and stderr are sent to files in the current working
 directory and stdin is copied from {\tt /dev/null}.
 
-The controller consults the Partition Manager to test whether the job 
+The controller consults the Partition Manager to test whether the job
 will ever be able to run.  If the user has requested a non-existent partition,
-a non-existent constraint, 
+a non-existent constraint,
 etc., the Partition Manager returns an error and the request is discarded.
-The failure is reported to \srun\, which informs the user and exits, for 
+The failure is reported to \srun\, which informs the user and exits, for
 example:
 \begin{verbatim}
 srun: error: Unable to allocate resources: Invalid partition name
 \end{verbatim}
 
-On successful submission, the controller assigns the job a unique 
+On successful submission, the controller assigns the job a unique
 {\em SLURM job id}, adds it to the job queue, and returns the job's
 job id to \srun\, which reports this to user and exits, returning
 success to the user's shell:
@@ -586,20 +586,20 @@ copied to a file in the current working directory by \srun :
 /path/to/cwd/slurm-42.out
 \end{verbatim}
 
-The user may examine output files at any time if they reside 
+The user may examine output files at any time if they reside
 in a globally accessible directory. In this example
-{\tt slurm-42.out} would  contain the output of the job script's two 
+{\tt slurm-42.out} would  contain the output of the job script's two
 commands (hostname and mping):
 
 \begin{verbatim}
 dev6
 dev7
-  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s                     
-  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s                     
-  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s                     
-  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s                     
+  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s
+  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s
+  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s
+  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s
   ...
-  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s              
+  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s
 \end{verbatim}
 
 When the tasks complete execution, \srun\ is notified by \slurmd\ of
@@ -608,14 +608,14 @@ Manager and exits.  \slurmd\ detects when the job script terminates and
 notifies the Job Manager of its exit status and begins cleanup.  The Job
 Manager directs the {\tt slurmd}s formerly assigned to the job to run
 the SLURM epilog program (if one is configured) as user {\em root}.
-Finally, the Job Manager releases the resources allocated to job {\em 42} 
+Finally, the Job Manager releases the resources allocated to job {\em 42}
 and updates the job status to {\em complete}. The record of a job's
 existence is eventually purged.
 
 \subsection{Example:  Executing an Interactive Job}
 
-In this example a user wishes to run the same {\em mping} command 
-in interactive mode, in which \srun\ blocks while the job executes 
+In this example a user wishes to run the same {\em mping} command
+in interactive mode, in which \srun\ blocks while the job executes
 and stdout/stderr of the job are copied onto stdout/stderr of {\tt srun}.
 The user submits the job, this time without the {\tt batch} option:
 
@@ -635,12 +635,12 @@ job script. In this case, the user sees the program output on stdout of
 {\tt srun}:
 
 \begin{verbatim}
-  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s                     
-  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s                     
-  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s                     
-  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s                     
+  1 pinged   0:        1 bytes      5.38 uSec     0.19 MB/s
+  1 pinged   0:        2 bytes      5.32 uSec     0.38 MB/s
+  1 pinged   0:        4 bytes      5.27 uSec     0.76 MB/s
+  1 pinged   0:        8 bytes      5.39 uSec     1.48 MB/s
   ...
-  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s              
+  1 pinged   0:  1048576 bytes   4682.97 uSec   223.91 MB/s
 \end{verbatim}
 
 When the job terminates, \srun\ receives an EOF (End Of File) on each
@@ -650,11 +650,11 @@ complete and terminates. The controller contacts all {\tt slurmd}s allocated
 to the terminating job and issues a request to run the SLURM epilog,
 then releases the job's resources.
 
-Most signals received by \srun\ while the job is executing are 
+Most signals received by \srun\ while the job is executing are
 transparently forwarded to the remote tasks. SIGINT (generated by
-Control-C) is a special case and only causes \srun\ to report 
+Control-C) is a special case and only causes \srun\ to report
 remote task status unless two SIGINTs are received in rapid succession.
-SIGQUIT (Control-$\backslash$) is another special case. SIGQUIT forces 
+SIGQUIT (Control-$\backslash$) is another special case. SIGQUIT forces
 termination of the running job.
 
 \section{Slurmctld Design}
@@ -744,7 +744,7 @@ component.  Data associated with a partition includes:
 
 \begin{itemize}
 \item Name
-\item RootOnly flag to indicate that only users {\em root} or 
+\item RootOnly flag to indicate that only users {\em root} or
 {\tt SlurmUser} may allocate resources in this partition (for any user)
 \item List of associated nodes
 \item State of partition (UP or DOWN)
@@ -812,7 +812,7 @@ which has an associated bit map. Usable node configuration bitmaps would
 be ANDed with the selected partitions bit map ANDed with the UP node
 bit map and possibly ANDed with the IDLE node bit map (this last test
 depends on the desire to share resources).  This method can eliminate
-tens of thousands of individual node configuration comparisons that 
+tens of thousands of individual node configuration comparisons that
 would otherwise be required in large heterogeneous clusters.
 
 The actual selection of nodes for allocation to a job is currently tuned
@@ -855,10 +855,10 @@ configuration file is shown in Table~\ref{sample_config}.
 
 \begin{tabular}[c]{c}
 \\
-\fbox{ 
+\fbox{
    \begin{minipage}[c]{0.8\linewidth}
-     {\tiny \verbatiminput{sample.config} } 
-   \end{minipage} 
+     {\tiny \verbatiminput{sample.config} }
+   \end{minipage}
 }
 \\
 \end{tabular}
@@ -881,8 +881,8 @@ There are a multitude of parameters associated with each job, including:
 \item Node constraints (processors, memory, features, etc.)
 \end{itemize}
 
-Job records have an associated hash table for rapidly locating 
-specific records. They also have bit maps of requested and/or 
+Job records have an associated hash table for rapidly locating
+specific records. They also have bit maps of requested and/or
 allocated nodes (as described above).
 
 The core functions supported by the Job Manager include:
@@ -890,10 +890,10 @@ The core functions supported by the Job Manager include:
 \item Request resource (job may be queued)
 \item Reset priority of a job
 \item Status job (including node list, memory and CPU use data)
-\item Signal job (send arbitrary signal to all processes associated 
+\item Signal job (send arbitrary signal to all processes associated
       with a job)
 \item Terminate job (remove all processes)
-\item Change node count of running job (could fail if insufficient 
+\item Change node count of running job (could fail if insufficient
 resources are available)
 %\item Preempt/resume job  (future)
 %\item Checkpoint/restart job (future)
@@ -909,25 +909,25 @@ or node state might permit the scheduling of a job.
 We are aware that this scheduling algorithm does not satisfy the needs
 of many customers, and we provide the means for establishing other
 scheduling algorithms. Before a newly arrived job is placed into the
-queue, an external scheduler plugin assigns its initial priority.  
+queue, an external scheduler plugin assigns its initial priority.
 A plugin function is also called at the start of each scheduling
 cycle to modify job or system state as desired.  SLURM APIs permit an
 external entity to alter the priorities of jobs at any time and re-order
 the queue as desired.  The Maui Scheduler \cite{Jackson2001,Maui2002}
 is one example of an external scheduler suitable for use with SLURM.
 
-LLNL uses DPCS \cite{DPCS2002} as SLURM's external scheduler. 
-DPCS is a meta-scheduler with flexible scheduling algorithms that 
-suit our needs well. 
+LLNL uses DPCS \cite{DPCS2002} as SLURM's external scheduler.
+DPCS is a meta-scheduler with flexible scheduling algorithms that
+suit our needs well.
 It also provides the scalability required for this application.
-DPCS maintains pending job state internally and only transfers the 
-jobs to SLURM (or another underlying resources manager) only when 
-they are to begin execution. 
-By not transferring jobs to a particular resources manager earlier, 
-jobs are assured of being initiated on the first resource satisfying 
-their requirements, whether a Linux cluster with SLURM or an IBM SP 
+DPCS maintains pending job state internally and only transfers the
+jobs to SLURM (or another underlying resources manager) only when
+they are to begin execution.
+By not transferring jobs to a particular resources manager earlier,
+jobs are assured of being initiated on the first resource satisfying
+their requirements, whether a Linux cluster with SLURM or an IBM SP
 with LoadLeveler (assuming a highly flexible application).
-This mode of operation may also be suitable for computational grid 
+This mode of operation may also be suitable for computational grid
 schedulers.
 
 In a future release, the Job Manager will collect resource consumption
@@ -994,7 +994,7 @@ running crashes, the job continues execution and no output is lost.
 
 The \slurmd\ daemon is a multi-threaded daemon for managing user jobs
 and monitoring system state.  Upon initiation it reads the configuration
-file, recovers any saved state, captures system state, 
+file, recovers any saved state, captures system state,
 attempts an initial connection to the SLURM
 controller, and awaits requests.  It services requests for system state,
 accounting information, job initiation, job state, job termination,
@@ -1012,18 +1012,18 @@ to {\tt slurmctld}.
 %FUTURE:  Another thread is
 %created to capture CPU, real-memory and virtual-memory consumption from
 %the process table entries.  Differences in resource utilization values
-%from one process table snapshot to the next are accumulated. \slurmd\ 
+%from one process table snapshot to the next are accumulated. \slurmd\
 %insures these accumulated values are not decremented if resource
 %consumption for a user happens to decrease from snapshot to snapshot,
 %which would simply reflect the termination of one or more processes.
 %Both the real and virtual memory high-water marks are recorded and
 %the integral of memory consumption (e.g. megabyte-hours).  Resource
 %consumption is grouped by uid and SLURM job id (if any). Data
-%is collected for system users ({\em root}, {\em ftp}, {\em ntp}, 
-%etc.) as well as customer accounts. 
+%is collected for system users ({\em root}, {\em ftp}, {\em ntp},
+%etc.) as well as customer accounts.
 %The intent is to capture all resource use including
 %kernel, idle and down time.  Upon request, the accumulated values are
-%uploaded to \slurmctld\ and cleared.  
+%uploaded to \slurmctld\ and cleared.
 
 \slurmd\ accepts requests from \srun\ and \slurmctld\ to initiate
 and terminate user jobs. The initiate job request contains such
@@ -1066,38 +1066,38 @@ or a privileged user.
 
 \subsection{scontrol}
 
-\scontrol\ is a tool meant for SLURM administration by user {\em root}. 
+\scontrol\ is a tool meant for SLURM administration by user {\em root}.
 It provides the following capabilities:
 \begin{itemize}
-\item {\tt Shutdown}: Cause \slurmctld\ and \slurmd\ to save state 
+\item {\tt Shutdown}: Cause \slurmctld\ and \slurmd\ to save state
 and terminate.
-\item {\tt Reconfigure}: Cause \slurmctld\ and \slurmd\ to reread the 
+\item {\tt Reconfigure}: Cause \slurmctld\ and \slurmd\ to reread the
 configuration file.
 \item {\tt Ping}: Display the status of primary and backup \slurmctld\ daemons.
-\item {\tt Show Configuration Parameters}: Display the values of general SLURM 
-configuration parameters such as locations of files and values of timers.  
-\item {\tt Show Job State}: Display the state information of a particular job 
+\item {\tt Show Configuration Parameters}: Display the values of general SLURM
+configuration parameters such as locations of files and values of timers.
+\item {\tt Show Job State}: Display the state information of a particular job
 or all jobs in the system.
-\item {\tt Show Job Step State}: Display the state information of a particular 
+\item {\tt Show Job Step State}: Display the state information of a particular
 job step or all job steps in the system.
-\item {\tt Show Node State}: Display the state and configuration information 
-of a particular node, a set of nodes (using numeric ranges syntax to 
+\item {\tt Show Node State}: Display the state and configuration information
+of a particular node, a set of nodes (using numeric ranges syntax to
 identify their names), or all nodes.
-\item {\tt Show Partition State}: Display the state and configuration 
+\item {\tt Show Partition State}: Display the state and configuration
 information of a particular partition or all partitions.
-\item {\tt Update Job State}: Update the state information of a particular job 
-in the system. Note that not all state information can be changed in this 
+\item {\tt Update Job State}: Update the state information of a particular job
+in the system. Note that not all state information can be changed in this
 fashion (e.g., the nodes allocated to a job).
 \item {\tt Update Node State}: Update the state of a particular node. Note
 that not all state information can be changed in this fashion (e.g., the
 amount of memory configured on a node). In some cases, you may need
 to modify the SLURM configuration file and cause it to be reread
-using the ``Reconfigure'' command described above.  
+using the ``Reconfigure'' command described above.
 \item {\tt Update Partition State}: Update the state of a partition
 node. Note that not all state information can be changed in this fashion
 (e.g., the default partition). In some cases, you may need to modify
 the SLURM configuration file and cause it to be reread using the
-``Reconfigure'' command described above.  
+``Reconfigure'' command described above.
 \end{itemize}
 
 \subsection{squeue}
@@ -1105,14 +1105,14 @@ the SLURM configuration file and cause it to be reread using the
 \squeue\ reports the state of SLURM jobs.  It can filter these
 jobs input specification of job state (RUN, PENDING, etc.), job id,
 user name, job name, etc.  If no specification is supplied, the state of
-all pending and running jobs is reported. 
+all pending and running jobs is reported.
 \squeue\ also has a variety of sorting and output options.
 
 \subsection{sinfo}
 
 \sinfo\ reports the state of SLURM partitions and nodes.  By default,
 it reports a summary of partition state with node counts and a summary
-of the configuration of those nodes.  A variety of sorting and 
+of the configuration of those nodes.  A variety of sorting and
 output formatting options exist.
 
 \subsection{srun}
@@ -1181,7 +1181,7 @@ later execution; {\em allocate}, in which \srun\ requests resources from
 the SLURM controller and spawns a shell with access to those resources;
 {\em attach}, in which \srun\ attaches to a currently
 running job and displays stdout/stderr in real time from the remote
-tasks.  
+tasks.
 
 % FUTURE:
 % An interactive job may also be forced into the background with a special
@@ -1203,13 +1203,13 @@ stderr are displayed on the user's terminal in real time, and stdin and
 signals may be forwarded from the  terminal transparently to the remote
 tasks. The second mode is {\em batch} or {\em queued} mode, in which the job is
 queued until the request for resources can be satisfied, at which time the
-job is run by SLURM as the submitting user. In the third mode,{\em allocate} 
+job is run by SLURM as the submitting user. In the third mode,{\em allocate}
 mode, a job is allocated to the requesting user, under which the user may
 manually run job steps via a script or in a sub-shell spawned by \srun .
 
 \begin{figure}[tb]
 \centerline{\epsfig{file=../figures/connections.eps,scale=0.4}}
-\caption{\small Job initiation connections overview. 1. \srun\ connects to 
+\caption{\small Job initiation connections overview. 1. \srun\ connects to
          \slurmctld\ requesting resources. 2. \slurmctld\ issues a response,
 	 with list of nodes and job step credential. 3. \srun\ opens a listen
 	 port for job IO connections, then sends a run job step
@@ -1254,8 +1254,8 @@ instantly if the user has requested that \srun\ block until resources are
 available.  When resources are available for the user's job, \slurmctld\
 replies with a job step credential, list of nodes that were allocated,
 cpus per node, and so on. \srun\ then sends a message each \slurmd\ on
-the allocated nodes requesting that a job step be initiated. 
-The \slurmd\ daemons verify that the job is valid using the forwarded job 
+the allocated nodes requesting that a job step be initiated.
+The \slurmd\ daemons verify that the job is valid using the forwarded job
 step credential and then respond to \srun .
 
 Each \slurmd\ invokes a job manager process to handle the request, which
@@ -1293,11 +1293,11 @@ epilog ran successfully, the nodes are returned to the partition.
 
 \begin{figure}[tb]
 \centerline{\epsfig{file=../figures/queued-job-init.eps,scale=0.45} }
-\caption{\small Queued job initiation. 
-         \slurmctld\ initiates the user's job as a batch script on one node. 
-	 Batch script contains an \srun\ call that initiates parallel tasks 
-	 after instantiating job step with controller. The shaded region is 
-	 a compressed representation and is shown in more detail in the 
+\caption{\small Queued job initiation.
+         \slurmctld\ initiates the user's job as a batch script on one node.
+	 Batch script contains an \srun\ call that initiates parallel tasks
+	 after instantiating job step with controller. The shaded region is
+	 a compressed representation and is shown in more detail in the
 	 interactive diagram (Figure~\ref{init-interactive})}
 \label{init-batch}
 \end{figure}
@@ -1305,9 +1305,9 @@ epilog ran successfully, the nodes are returned to the partition.
 Figure~\ref{init-batch} shows the initiation of a queued job in
 SLURM.  The user invokes \srun\ in batch mode by supplying the {\tt --batch}
 option to \srun . Once user options are processed, \srun\ sends a batch
-job request to \slurmctld\ that identifies the stdin, stdout and stderr file 
-names for the job, current working directory, environment, requested 
-number of nodes, etc. 
+job request to \slurmctld\ that identifies the stdin, stdout and stderr file
+names for the job, current working directory, environment, requested
+number of nodes, etc.
 The \slurmctld\ queues the request in its priority-ordered queue.
 
 Once the resources are available and the job has a high enough priority, \linebreak
@@ -1334,7 +1334,7 @@ of the epilog, the nodes are returned to the partition.
 \begin{figure}[tb]
 \centerline{\epsfig{file=../figures/allocate-init.eps,scale=0.45} }
 \caption{\small Job initiation in allocate mode. Resources are allocated and
-         \srun\ spawns a shell with access to the resources. When user runs 
+         \srun\ spawns a shell with access to the resources. When user runs
 	 an \srun\ from within the shell, the a job step is initiated under
 	 the allocation}
 \label{init-allocate}
@@ -1352,9 +1352,9 @@ complete).
 An \srun\ initiated within the allocate sub-shell recognizes that
 it is running under an allocation and therefore already within a
 job. Provided with no other arguments, \srun\ started in this manner
-initiates a job step on all nodes within the current job. 
+initiates a job step on all nodes within the current job.
 
-% Maybe later: 
+% Maybe later:
 %
 % However, the user may select a subset of these nodes implicitly by using
 % the \srun\ {\tt --nodes} option, or explicitly by specifying a relative
@@ -1409,11 +1409,11 @@ nodes to the partition as they successfully complete the epilog.
 \label{timing}
 \end{figure}
 
-We were able to perform some SLURM tests on a 1000-node cluster 
-in November 2002. Some development was still underway at that time 
-and tuning had not been performed. The results for executing the 
-program {\em /bin/hostname} on two tasks per node and various node 
-counts are shown in Figure~\ref{timing}. We found SLURM performance 
+We were able to perform some SLURM tests on a 1000-node cluster
+in November 2002. Some development was still underway at that time
+and tuning had not been performed. The results for executing the
+program {\em /bin/hostname} on two tasks per node and various node
+counts are shown in Figure~\ref{timing}. We found SLURM performance
 to be comparable to the
 Quadrics Resource Management System (RMS) \cite{Quadrics2002} for all
 job sizes and about 80 times faster than IBM LoadLeveler\cite{LL2002}
@@ -1421,8 +1421,8 @@ at tested job sizes.
 
 \section{Future Plans}
 
-SLURM begin production use on LLNL Linux clusters in March 2003 
-and is available from our web site\cite{SLURM2003}. 
+SLURM begin production use on LLNL Linux clusters in March 2003
+and is available from our web site\cite{SLURM2003}.
 
 While SLURM is able to manage 1000 nodes without difficulty using
 sockets and Ethernet, we are reviewing other communication mechanisms
@@ -1433,7 +1433,7 @@ including a broadcast capability.  STORM only supports the Quadrics
 Elan interconnnect at present, but it does offer the promise of improved
 performance and scalability.
 
-Looking ahead, we anticipate adding support for additional 
+Looking ahead, we anticipate adding support for additional
 interconnects (InfiniBand and the IBM
 Blue Gene \cite{BlueGene2002} system\footnote{Blue Gene has a different
 interconnect than any supported by SLURM and a 3-D topography with
@@ -1450,19 +1450,19 @@ use by each parallel job is planned for a future release.
 SLURM is jointly developed by LLNL and Linux NetworX.
 Contributers to SLURM development include:
 \begin{itemize}
-\item Jay Windley of Linux NetworX for his development of the plugin 
+\item Jay Windley of Linux NetworX for his development of the plugin
 mechanism and work on the security components
 \item Joey Ekstrom for his work developing the user tools
 \item Kevin Tew for his work developing the communications infrastructure
-\item Jim Garlick for his development of the Quadrics Elan interface and 
+\item Jim Garlick for his development of the Quadrics Elan interface and
 technical guidance
-\item Gregg Hommes, Bob Wood, and Phil Eckert for their help designing the 
+\item Gregg Hommes, Bob Wood, and Phil Eckert for their help designing the
 SLURM APIs
 \item Mark Seager and Greg Tomaschke for their support of this project
 \item Chris Dunlap for technical guidance
 \item David Jackson of Linux NetworX for technical guidance
-\item Fabrizio Petrini of Los Alamos National Laboratory for his work to 
-integrate SLURM with STORM communications 
+\item Fabrizio Petrini of Los Alamos National Laboratory for his work to
+integrate SLURM with STORM communications
 \end{itemize}
 
 %\appendix
@@ -1473,7 +1473,7 @@ integrate SLURM with STORM communications
 %\item[Authd]    User authentication mechanism
 %\item[DCE]	Distributed Computing Environment
 %\item[DFS]	Distributed File System (part of DCE)
-%\item[DPCS]	Distributed Production Control System, a meta-batch system 
+%\item[DPCS]	Distributed Production Control System, a meta-batch system
 %		and resource manager developed by LLNL
 %\item[Globus]	Grid scheduling infrastructure
 %\item[Kerberos]	Authentication mechanism
diff --git a/doc/pubdesign/sample.config b/doc/pubdesign/sample.config
index e0d5f2f38b79bf76c358ab5f27e0041542ea2cf0..7cb57f8300fcbdab996790816f3bac2c7944f467 100644
--- a/doc/pubdesign/sample.config
+++ b/doc/pubdesign/sample.config
@@ -1,9 +1,9 @@
-# 
+#
 # Sample /etc/slurm.conf
 # Author: John Doe
 # Date: 11/06/2001
 
-ControlMachine=lx0000   ControlAddr=elx0000 
+ControlMachine=lx0000   ControlAddr=elx0000
 BackupController=lx0001 BackupAddr=elx0001
 
 AuthType="auth/authd"
diff --git a/doc/sigops/Makefile b/doc/sigops/Makefile
index a71d48628c84550826e64819757d5fc98cc67611..5a8b6008ab9c32aa11eaee52ef43261e7e444280 100644
--- a/doc/sigops/Makefile
+++ b/doc/sigops/Makefile
@@ -10,7 +10,7 @@
 
 REPORT = report
 
-TEX = ../common/llnlCoverPage.tex $(REPORT).tex 
+TEX = ../common/llnlCoverPage.tex $(REPORT).tex
 
 FIGDIR = ../figures
 FIGS = $(FIGDIR)/arch2.eps \
@@ -21,7 +21,7 @@ FIGS = $(FIGDIR)/arch2.eps \
 BIB = ../common/project.bib
 
 %.eps: %.dia
-	dia --nosplash -e $@ $< 
+	dia --nosplash -e $@ $<
 %.eps: %.gpl
 	gnuplot $<
 %.eps: %.fig
@@ -31,9 +31,9 @@ BIB = ../common/project.bib
 %.ps: %.dvi
 	dvips -K -t letter -o $(@F) $(<F)
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
-all: $(REPORT).ps 
+all: $(REPORT).ps
 
 
 $(REPORT).dvi: $(TEX) $(FIGS) $(BIB)
@@ -48,6 +48,6 @@ $(REPORT).dvi: $(TEX) $(FIGS) $(BIB)
 view: $(REPORT).ps
 	ghostview $(REPORT) &
 
-clean: 
+clean:
 	rm -f *~ *.dvi *.log *.aux *.ps *.pdf *.blg *.bbl #*.eps #*.gif
-	      
+
diff --git a/doc/sigops/report.tex b/doc/sigops/report.tex
index 96fe208273d8d068f12c8a5285be612050a4ee89..5f8186e7383e92c6d9d655f1add075a2625e1bf7 100644
--- a/doc/sigops/report.tex
+++ b/doc/sigops/report.tex
@@ -1,4 +1,4 @@
-% Presenter info: 
+% Presenter info:
 % SIGOPS
 %
 % Use a two-column, 8.5" x 11" document format
@@ -68,88 +68,88 @@
 
 \maketitle
 \begin{abstract}
-While the increase in performance of individual computer 
+While the increase in performance of individual computer
 processors through the years has been impressive, the demand
-for compute resources has grown even more rapidly. 
-This demand has been met over the past couple of decades by 
-turning to parallel computing. 
-While operating systems, compilers, libraries, and a wide 
-variety of tools exist to make effective use of independent 
-computers, a new class of tools is required to effectively 
-utilize a parallel computer. 
-One important tool is the resource manager, which can be viewed 
-as the "glue" to run applications on the parallel computer. 
+for compute resources has grown even more rapidly.
+This demand has been met over the past couple of decades by
+turning to parallel computing.
+While operating systems, compilers, libraries, and a wide
+variety of tools exist to make effective use of independent
+computers, a new class of tools is required to effectively
+utilize a parallel computer.
+One important tool is the resource manager, which can be viewed
+as the "glue" to run applications on the parallel computer.
 One very popular resource manager is the Simple Linux
-Utility for Resource Management (SLURM, 
-http://www.llnl.gov/linux/slurm). 
-SLURM performs resource management on about 1000 parallel 
-computers around the world, including many of the largest 
-systems. 
-This paper will describe resource management scalability 
+Utility for Resource Management (SLURM,
+http://www.llnl.gov/linux/slurm).
+SLURM performs resource management on about 1000 parallel
+computers around the world, including many of the largest
+systems.
+This paper will describe resource management scalability
 issues and SLURM's implementation.
 \end{abstract}
 
 
 \section{Introduction}
 
-Executing a parallel program on a cluster involves several 
-steps. First resources (cores, memory, nodes, etc.) suitable 
-for running the program must be identified. These resources 
-are then typically dedicated to the program. In some 
-cases, the computer's network/interconnect must be 
+Executing a parallel program on a cluster involves several
+steps. First resources (cores, memory, nodes, etc.) suitable
+for running the program must be identified. These resources
+are then typically dedicated to the program. In some
+cases, the computer's network/interconnect must be
 configured for the program's use. The program's tasks
-are initiated on the allocated resources. The input and 
-output for all of these tasks must be processed. 
-Upon the program termination, it's allocated resources 
-are released for use by other programs. 
-All of these operations are straightforward, but performing 
-resource management on clusters containing thousands of 
-nodes and over 130,000 processor cores requires more 
+are initiated on the allocated resources. The input and
+output for all of these tasks must be processed.
+Upon the program termination, it's allocated resources
+are released for use by other programs.
+All of these operations are straightforward, but performing
+resource management on clusters containing thousands of
+nodes and over 130,000 processor cores requires more
 than a high degree of parallelism.
 In many respects, data management and fault-tolerance issues
 are paramount.
 
-SLURM is a resource manager jointly developed by Lawrence 
-Livermore National Laboratory (LLNL), 
+SLURM is a resource manager jointly developed by Lawrence
+Livermore National Laboratory (LLNL),
 Hewlett-Packard, and Linux NetworX
 ~\cite{SLURM2003,Yoo2003,SlurmWeb}.
 SLURM's general characteristics include:
 
 \begin{itemize}
-\item {\tt Simplicity}: SLURM is simple enough to allow motivated 
-end users to understand its source code and add functionality. 
-It supports only a few simple scheduling algorithms, 
+\item {\tt Simplicity}: SLURM is simple enough to allow motivated
+end users to understand its source code and add functionality.
+It supports only a few simple scheduling algorithms,
 but relies upon an external scheduler for sophisticated
 workload prioritization.
 
-\item {\tt Open Source}: SLURM is available to everyone and 
+\item {\tt Open Source}: SLURM is available to everyone and
 will remain free.
 Its source code is distributed under the GNU General Public
 License
 ~\cite{GPL2002}.
 
-\item {\tt Portability}: SLURM is written in the C language, 
+\item {\tt Portability}: SLURM is written in the C language,
 with a GNU {\em autoconf} configuration engine.
 SLURM has a fully functional skeleton of functionality with a
-wide assortment of plugins available for customization. 
-A total of 30 different plugins are available and provide a 
-great deal of flexibility in configuring SLURM using a 
+wide assortment of plugins available for customization.
+A total of 30 different plugins are available and provide a
+great deal of flexibility in configuring SLURM using a
 building block approach.
 
-\item {\tt Scalability}: SLURM is designed for scalability to clusters 
+\item {\tt Scalability}: SLURM is designed for scalability to clusters
 of thousands of nodes. Time to fully execute (allocate, launch
 tasks, process I/O, and deallocate resources) a simple program
-is only 2 seconds for 4,800 tasks on 2,400 nodes. Clusters 
-containing up to 16,384 nodes have been emulated with highly 
-scalable performance. 
+is only 2 seconds for 4,800 tasks on 2,400 nodes. Clusters
+containing up to 16,384 nodes have been emulated with highly
+scalable performance.
 
 \item {\tt Fault Tolerance}: SLURM can handle a variety of failures
-in hardward or the infrastructure without inducing failures in 
+in hardward or the infrastructure without inducing failures in
 the workload.
 
 \item {\tt Security}: SLURM employs crypto technology to authenticate
 users to services and services to each other with a variety of options
-available through the plugin mechanism.  
+available through the plugin mechanism.
 
 \item {\tt System Administrator Friendly}: SLURM utilizes
 a simple configuration file and minimizes distributed state.
@@ -169,13 +169,13 @@ interfaces are usable by scripts and its behavior is highly deterministic.
 
 SLURM's commands and daemons are illustrated in Figure~\ref{arch}.
 The main SLURM control program, {\tt slurmctld}, orchestrates
-activities throughout the cluster. While highly optimized, 
-{\tt slurmctld} is best run on a dedicated node of the cluster for optimal performance. 
-In addition, SLURM provides the option of running  a backup controller 
+activities throughout the cluster. While highly optimized,
+{\tt slurmctld} is best run on a dedicated node of the cluster for optimal performance.
+In addition, SLURM provides the option of running  a backup controller
 on another node for increased fault-tolerance.
-Each node in the cluster available for running user applications 
+Each node in the cluster available for running user applications
 has a relatively small daemon called {\tt slurmd} that monitors
-and manages resources and work within that node. 
+and manages resources and work within that node.
 Several user tools are provided:
 
 \begin{itemize}
@@ -188,7 +188,7 @@ system configuration
 
 \item {\tt scancel} can signal and/or cancel jobs or job steps
 
-\item {\tt smap} reports node, queue, and job status including 
+\item {\tt smap} reports node, queue, and job status including
 topological state information
 
 \item {\tt sacct} reports job accounting information
@@ -204,7 +204,7 @@ topological state information
 \label{entities}
 \end{figure}
 
-The entities managed by SLURM are illustrated in Figure~\ref{entities} 
+The entities managed by SLURM are illustrated in Figure~\ref{entities}
 and include:
 {\em nodes} including their processors, memory and temporary disk space,
 {\em partitions} are collections of nodes with various limits and constraints,
@@ -212,34 +212,34 @@ and include:
 to a user for a specified amount of time, and
 {\em job steps} are sets of (possibly parallel) tasks within a job.
 Each node must be capable of independent scheduling and job execution
-\footnote{On BlueGene computers, the c-nodes can not be independently 
-scheduled. Each midplane or base partition is considered a SLURM node 
-with 1,024 processors. SLURM supports the execution of more than one 
+\footnote{On BlueGene computers, the c-nodes can not be independently
+scheduled. Each midplane or base partition is considered a SLURM node
+with 1,024 processors. SLURM supports the execution of more than one
 job per BlueGene node.}.
-Each job in the priority-ordered queue is allocated nodes within a single 
+Each job in the priority-ordered queue is allocated nodes within a single
 partition.
-Since nodes can be in multiple partitions, one can think of them as 
-general purpose queues for jobs. 
+Since nodes can be in multiple partitions, one can think of them as
+general purpose queues for jobs.
 
 \section{Implementation}
 
-Very high scalability was treated as a high priority for SLURM. 
-More comprehensive support for smaller clusters in fact was 
+Very high scalability was treated as a high priority for SLURM.
+More comprehensive support for smaller clusters in fact was
 added in later revisions.
-For example, the initial implementation allocated whole nodes 
+For example, the initial implementation allocated whole nodes
 to jobs, in part to avoid the extra overhead of tracking individual
 processors.
-While allocation of entire nodes to jobs is still a recommended mode of 
-operation for very large clusters, an alternate SLURM plugin provides 
+While allocation of entire nodes to jobs is still a recommended mode of
+operation for very large clusters, an alternate SLURM plugin provides
 resource management down the the resolution of individual processors.
 
-The SLURM's {\tt srun} command and the daemons are extensively 
-multi-threaded. 
-{\tt slurmctld} also maintains independent read and 
-write locks for critical data structures. 
-The combination of these two features means that, 
+The SLURM's {\tt srun} command and the daemons are extensively
+multi-threaded.
+{\tt slurmctld} also maintains independent read and
+write locks for critical data structures.
+The combination of these two features means that,
 for example, three users can get job state information at the same
-time that a system administrator is modifying the time limit 
+time that a system administrator is modifying the time limit
 for a partition.
 
 \begin{figure}[tcb]
@@ -248,82 +248,82 @@ for a partition.
 \label{comms}
 \end{figure}
 
-Communications to large numbers of nodes are optimized in two 
+Communications to large numbers of nodes are optimized in two
 ways. The programs initiating communications are multithreaded
-and can process tens or even hundreds of simultaneous active 
-communications. 
-Second, the {\tt slurmd} daemon is designed to forward 
+and can process tens or even hundreds of simultaneous active
+communications.
+Second, the {\tt slurmd} daemon is designed to forward
 communications on a hierarchical basis as shown in Figure ~\ref{comms}.
 For example, the initiation of tasks on 1000 nodes does not require
 {\tt srun} to directly communication with all 1000 nodes. {\tt Srun}
 can communicate directly with {\tt slurmd} daemons on 32 nodes
-(the degree of fanout in communications is configurable). 
+(the degree of fanout in communications is configurable).
 Each of those {\tt slurmd} will simultaneously forward the request
-to {\tt slurmd} programs on another 32 nodes. 
+to {\tt slurmd} programs on another 32 nodes.
 This improves performance by distributing the communication workload.
-Note that every communication is authenticated and acknowleged 
+Note that every communication is authenticated and acknowleged
 for fault-tolerance.
 
 A number of interesting papers
-~\cite{Jones2003,Kerbyson2001,Petrini2003,Phillips2003,Tsafrir2005} 
+~\cite{Jones2003,Kerbyson2001,Petrini2003,Phillips2003,Tsafrir2005}
 have recently been written about
-the impact of system daemons and other system overhead on 
-parallel job performance. This {\tt system noise} can have a 
-dramatic impact upon the performance of highly parallel jobs. 
-In a simplified example, consider a system daemon that run for 
-one second out of every 100 seconds on every node in a cluster. 
-For serial jobs this 
-reduces throughput by one percent, but the impact is compounded 
-on parallel computers if these daemons do not all execute concurrently. 
-If the parallel program runs on 100 nodes and tries to synchronize 
+the impact of system daemons and other system overhead on
+parallel job performance. This {\tt system noise} can have a
+dramatic impact upon the performance of highly parallel jobs.
+In a simplified example, consider a system daemon that run for
+one second out of every 100 seconds on every node in a cluster.
+For serial jobs this
+reduces throughput by one percent, but the impact is compounded
+on parallel computers if these daemons do not all execute concurrently.
+If the parallel program runs on 100 nodes and tries to synchronize
 every second, almost every synchronization period will include a
-one second delay from the daemon running on one of the 100 nodes. 
-This effectively limits job parallelism to about 50-way, orders 
+one second delay from the daemon running on one of the 100 nodes.
+This effectively limits job parallelism to about 50-way, orders
 of magnitude smaller than the largest systems currently available.
 SLURM addresses this issue by:
 \begin{itemize}
-\item Making the {\tt slurmd} daemon resource requirements negligible 
-\item Supporting configurations that let the {\tt slurmd} daemon sleep 
+\item Making the {\tt slurmd} daemon resource requirements negligible
+\item Supporting configurations that let the {\tt slurmd} daemon sleep
 during the entire job execution period
-\item If the {\tt slurmd} daemons do perform work, it is done on a 
+\item If the {\tt slurmd} daemons do perform work, it is done on a
 highly synchronized fashion across all nodes
 \end{itemize}
-In addition, the default mode of operation is to allocate entire 
-nodes with all of their processors to applications rather than 
-individual processors on each node. 
-This eliminates the possibility of interference between jobs, 
+In addition, the default mode of operation is to allocate entire
+nodes with all of their processors to applications rather than
+individual processors on each node.
+This eliminates the possibility of interference between jobs,
 which could severely degrade performance of parallel applications.
-Allocation of resources to the resolution of individual processors 
-on each node is supported by SLURM, but this comes at a higher cost 
+Allocation of resources to the resolution of individual processors
+on each node is supported by SLURM, but this comes at a higher cost
 in terms of the data managed.
 The selection of resource resolution is provided by different plugins.
 
-Resource management of large clusters entails the processing of 
-large quantities of data, both for the software and the 
+Resource management of large clusters entails the processing of
+large quantities of data, both for the software and the
 system administrator.
-In order to ease the burden on system administrators, SLURM 
+In order to ease the burden on system administrators, SLURM
 configuration files and tools all support node naming using
 numeric ranges.
-For example, "linux[1-4096]" represents 4096 node names with 
-a prefix of "linux" and numeric suffix from 1 to 4096. 
-These naming convention permits even the largest clusters 
-to be described in a configure file containing only a 
-couple of dozen lines. 
+For example, "linux[1-4096]" represents 4096 node names with
+a prefix of "linux" and numeric suffix from 1 to 4096.
+These naming convention permits even the largest clusters
+to be described in a configure file containing only a
+couple of dozen lines.
 State information output from various SLURM commands uses
 the same convention to maintain a modest volume of output
-on even large cluster. 
+on even large cluster.
 
-Extensive use is made of bitmaps to represent nodes in the cluster. 
-For example, bitmaps are maintained for each unique node configuration, 
-the nodes associated with each partition, nodes allocated to 
-the active jobs, nodes available for use, etc. This reduces most 
+Extensive use is made of bitmaps to represent nodes in the cluster.
+For example, bitmaps are maintained for each unique node configuration,
+the nodes associated with each partition, nodes allocated to
+the active jobs, nodes available for use, etc. This reduces most
 scheduling operations to very rapid AND and OR operations on those bitmaps.
 
 \section{Application Launch}
 
-To better illustrate SLURM's operation, the execution of an 
+To better illustrate SLURM's operation, the execution of an
 application is detailed below and illustrated in Figure~\ref{launch}.
-This example is based upon a typical configuration and the 
+This example is based upon a typical configuration and the
 {\em interactive} mode, in which stdout and
 stderr are displayed on the user's terminal in real time, and stdin and
 signals may be forwarded from the terminal transparently to the remote
@@ -335,65 +335,65 @@ tasks.
 \label{launch}
 \end{figure}
 
-The task launch request is initiated by a user's execution of the 
-{\tt srun} command. {\tt Srun} has a multitude of options to specify 
-resource requirements such as minimum memory per node, minimum 
-temporary disk space per node, features associated with nodes, 
+The task launch request is initiated by a user's execution of the
+{\tt srun} command. {\tt Srun} has a multitude of options to specify
+resource requirements such as minimum memory per node, minimum
+temporary disk space per node, features associated with nodes,
 partition to use, node count, task count, etc.
-{\tt Srun} gets a credential to identify the user and his group 
-then sends the request to {\tt slurmctld} (message 1). 
+{\tt Srun} gets a credential to identify the user and his group
+then sends the request to {\tt slurmctld} (message 1).
 
-{\tt Slurmctld} authenticates the request and identifies the resources 
+{\tt Slurmctld} authenticates the request and identifies the resources
 to be allocated using a series of bitmap operations.
-First the nodes containing the appropriate resources (processors, 
-memory, temporary disk space, and features) are identified through 
-comparison with a node configuration table, which typically has 
-a very small number of entries. 
-The resulting bitmap is ANDed with the bitmap associated with the 
-requested partition. 
-This bitmap is ANDed with the bitmap identifying available nodes. 
-The requested node and/or processor count is then satisfied from 
+First the nodes containing the appropriate resources (processors,
+memory, temporary disk space, and features) are identified through
+comparison with a node configuration table, which typically has
+a very small number of entries.
+The resulting bitmap is ANDed with the bitmap associated with the
+requested partition.
+This bitmap is ANDed with the bitmap identifying available nodes.
+The requested node and/or processor count is then satisfied from
 the nodes identified with the resulting bitmap.
-This completes the job allocation process, but for interactive 
-mode, a job step credential is also constructed for the allocation 
+This completes the job allocation process, but for interactive
+mode, a job step credential is also constructed for the allocation
 and sent to {\tt srun} in the reply (message 2).
 
 The {\tt srun} command open sockets for task input and output then
-sends the job step credential directly to the {\tt slurmd} daemons 
+sends the job step credential directly to the {\tt slurmd} daemons
 (message 3) in order to launch the tasks, which is acknowledged
 (message 4).
-Note the {\tt slurmctld} and {\tt slurmd} daemons do not directly 
-communicate during the task launch operation in order to minimize the 
-workload on the {\tt slurmctld}, which has to manage the entire 
+Note the {\tt slurmctld} and {\tt slurmd} daemons do not directly
+communicate during the task launch operation in order to minimize the
+workload on the {\tt slurmctld}, which has to manage the entire
 cluster.
 
-Task termination is communicated to {\tt srun} over the same 
-socket used for input and output. 
-When all tasks have terminated, {\tt srun} notifies {\tt slurmctld} 
+Task termination is communicated to {\tt srun} over the same
+socket used for input and output.
+When all tasks have terminated, {\tt srun} notifies {\tt slurmctld}
 of the job step termination (message 5).
-{\tt Slurmctld} authenticates the request, acknowledges it 
-(message 6) and sends messages to the {\tt slurmd} daemons to 
-insure that all processes associated with the job have 
-terminated (message 7). 
-Upon receipt of job termination confirmation on each node (message 8), 
+{\tt Slurmctld} authenticates the request, acknowledges it
+(message 6) and sends messages to the {\tt slurmd} daemons to
+insure that all processes associated with the job have
+terminated (message 7).
+Upon receipt of job termination confirmation on each node (message 8),
 {\tt slurmctld} releases the resources for use by another job.
 
-The full time for execution of a simple parallel application across 
-a few nodes is a few milliseconds. 
-The time reaches a few seconds for jobs that span thousands of 
+The full time for execution of a simple parallel application across
+a few nodes is a few milliseconds.
+The time reaches a few seconds for jobs that span thousands of
 nodes.
-The times will vary with the hardware and configuration used, 
+The times will vary with the hardware and configuration used,
 but is hardly noticeable to the user at even the largest scales.
 
 \section{Conclusion}
 
 SLURM has demonstrated high reliability and scalability.
-It provides resource management on several clusters containing 
-over 2,000 compute nodes and has emulated clusters containing 
-up to 16,384 compute nodes, four times the size of of the 
-largest parallel computer in use today. 
-The cluster size is presently limited by the size of some SLURM 
-data structures, but operation on larger systems is certainly 
+It provides resource management on several clusters containing
+over 2,000 compute nodes and has emulated clusters containing
+up to 16,384 compute nodes, four times the size of of the
+largest parallel computer in use today.
+The cluster size is presently limited by the size of some SLURM
+data structures, but operation on larger systems is certainly
 feasible with minor code changes.
 
 \raggedright
diff --git a/doc/slides/bigpicture/Makefile b/doc/slides/bigpicture/Makefile
index d680968fbcbd4aef0365f7af642802526ec8d3f3..c4413602c5dc584dc6d30412cafac852490c6293 100644
--- a/doc/slides/bigpicture/Makefile
+++ b/doc/slides/bigpicture/Makefile
@@ -18,5 +18,5 @@ slides.ps: slides.dvi
 	tgif -print -eps $<
 
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
diff --git a/doc/slides/bigpicture/penguin.eps b/doc/slides/bigpicture/penguin.eps
index 46819fc95ef19371934999910391bd01a47c0eee..48b94b89a2fc183b89fb4a375d1c90e189d2c4ce 100644
--- a/doc/slides/bigpicture/penguin.eps
+++ b/doc/slides/bigpicture/penguin.eps
@@ -17,7 +17,7 @@
 %%BeginProlog
 %%BeginResource: procset Adobe_packedarray 2.0 0
 %%Title: (Packed Array Operators)
-%%Version: 2.0 
+%%Version: 2.0
 %%CreationDate: (8/2/90) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 userdict /Adobe_packedarray 5 dict dup begin put
@@ -60,7 +60,7 @@ currentdict readonly pop end
 Adobe_packedarray /initialize get exec
 %%BeginResource: procset Adobe_cshow 1.1 0
 %%Title: (cshow Operator)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (1/23/89) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -106,7 +106,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_customcolor 1.0 0
 %%Title: (Custom Color Operators)
-%%Version: 1.0 
+%%Version: 1.0
 %%CreationDate: (5/9/88) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -162,7 +162,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_IllustratorA_AI3 1.1 3
 %%Title: (Adobe Illustrator (R) Version 3.0 Abbreviated Prolog)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (3/7/1994) ()
 %%Copyright: ((C) 1987-1994 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -181,7 +181,7 @@ userdict /Adobe_IllustratorA_AI3_vars 58 dict dup begin put
 /_doClip 0 def
 /cf	currentflat def
 /_tm matrix def
-/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def 
+/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def
 /_renderEnd [null null null null /i1 /i1 /i1 /i1] def
 /_render -1 def
 /_rise 0 def
@@ -268,9 +268,9 @@ exch 5 -1 roll 3 index mul add
 /swj
 {
 dup 4 1 roll
-dup length exch stringwidth 
+dup length exch stringwidth
 exch 5 -1 roll 3 index mul add
-4 1 roll 3 1 roll mul add 
+4 1 roll 3 1 roll mul add
 6 2 roll /_cnt 0 ddef
 {1 index eq {/_cnt _cnt 1 add ddef} if} forall pop
 exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
@@ -279,7 +279,7 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
+	2 npop
 	(0) exch 2 copy 0 exch put pop
 	gsave
 	false charpath currentpoint
@@ -295,12 +295,12 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
+	2 npop
+	(0) exch 2 copy 0 exch put
 	gsave
-	_sp eq 
+	_sp eq
 		{
-		exch 6 index 6 index 6 index 5 -1 roll widthshow  
+		exch 6 index 6 index 6 index 5 -1 roll widthshow
 		currentpoint
 		}
 		{
@@ -326,11 +326,11 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 /jsp
 {
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
-	_sp eq 
+	2 npop
+	(0) exch 2 copy 0 exch put
+	_sp eq
 		{
-		exch 5 index 5 index 5 index 5 -1 roll widthshow  
+		exch 5 index 5 index 5 index 5 -1 roll widthshow
 		}
 		{
 		false charpath
@@ -445,11 +445,11 @@ closepath
 } def
 /N
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq {clip /_doClip 0 ddef} if 
+	_doClip 1 eq {clip /_doClip 0 ddef} if
 	newpath
-	} 
+	}
 	{
 	/CRender {N} ddef
 	}ifelse
@@ -458,17 +458,17 @@ _pola 0 eq
 {N} def
 /F
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _pf grestore clip newpath /_lp /none ddef _fc 
+		gsave _pf grestore clip newpath /_lp /none ddef _fc
 		/_doClip 0 ddef
 		}
 		{
 		_pf
 		}ifelse
-	} 
+	}
 	{
 	/CRender {F} ddef
 	}ifelse
@@ -480,17 +480,17 @@ F
 } def
 /S
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _ps grestore clip newpath /_lp /none ddef _sc 
+		gsave _ps grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
 		}
 		{
 		_ps
 		}ifelse
-	} 
+	}
 	{
 	/CRender {S} ddef
 	}ifelse
@@ -502,14 +502,14 @@ S
 } def
 /B
 {
-_pola 0 eq 
+_pola 0 eq
 	{
 	_doClip 1 eq
-	gsave F grestore 
+	gsave F grestore
 		{
 		gsave S grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
-		} 
+		}
 		{
 		S
 		}ifelse
@@ -529,10 +529,10 @@ B
 } def
 /*
 {
-count 0 ne 
+count 0 ne
 	{
 	dup type (stringtype) eq {pop} if
-	} if 
+	} if
 _pola 0 eq {newpath} if
 } def
 /u
@@ -553,7 +553,7 @@ _pola 1 add /_pola exch ddef
 } def
 /*U
 {
-_pola 1 sub /_pola exch ddef 
+_pola 1 sub /_pola exch ddef
 _pola 0 eq {CRender} if
 } def
 /D
@@ -601,7 +601,7 @@ _i restore
 {
 /_gf exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
@@ -719,7 +719,7 @@ jss
 findcmykcustomcolor
 /_if exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
diff --git a/doc/slides/bigpicture/slides.tex b/doc/slides/bigpicture/slides.tex
index 1f9e8c4b4a63e15c5f6b767dca4682b0088195aa..94730b6f0156b68e281c47b0bb7b3e9da5aefc0f 100644
--- a/doc/slides/bigpicture/slides.tex
+++ b/doc/slides/bigpicture/slides.tex
@@ -22,9 +22,9 @@
 \renewcommand{\footrulewidth}{0.2mm}
 \fancyhead[C]{\Large\textbf{SLURM Design - The Big Picture}}
 %\fancyfoot[L]{\tiny\thedate}
-\fancyfoot[L]{\includegraphics[scale=0.075]{penguin.eps}\\\tiny LINUX} 
+\fancyfoot[L]{\includegraphics[scale=0.075]{penguin.eps}\\\tiny LINUX}
 %\fancyfoot[R]{\small LLNL}
-\fancyfoot[R]{\includegraphics[scale=0.2]{llnl.ps}\\\tiny LLNL} 
+\fancyfoot[R]{\includegraphics[scale=0.2]{llnl.ps}\\\tiny LLNL}
 \fancyfoot[C]{\tiny Page \theslide}
 
 % Create room for headers and footers
@@ -199,7 +199,7 @@
     \item{Controller sends list of hosts and {\em nonce}}
     \item{{\tt srun} connects to {\tt slurmd}'s and tails stdout/stderr}
     \item{{\tt srun} gets EOF on stdout/stderr streams on termination}
-    \item{Or Control-C sends abort message to {\tt slurmd's} and 
+    \item{Or Control-C sends abort message to {\tt slurmd's} and
           controller cleans up}
     \item{Way to detach {\tt srun} from job and let it continue as batch}
   \end{itemize}
diff --git a/doc/slides/job-initiation/Makefile b/doc/slides/job-initiation/Makefile
index 47ab91891640ad97aec5c6d86db7b1725a6cbc4b..f803225838b8b7a2c9aea8dbd32bf64834bd289a 100644
--- a/doc/slides/job-initiation/Makefile
+++ b/doc/slides/job-initiation/Makefile
@@ -9,7 +9,7 @@ viewps: slides.ps
 clean:
 	rm -f slides.dvi slides.aux slides.log slides.ps
 
-slides.dvi: slides.tex penguin.eps 
+slides.dvi: slides.tex penguin.eps
 	latex slides
 slides.ps: slides.dvi
 	dvips -O 0.75in,0 -f <slides.dvi >$@  # slides 1-per page
@@ -18,5 +18,5 @@ slides.ps: slides.dvi
 	tgif -print -eps $<
 
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
diff --git a/doc/slides/job-initiation/penguin.eps b/doc/slides/job-initiation/penguin.eps
index 46819fc95ef19371934999910391bd01a47c0eee..48b94b89a2fc183b89fb4a375d1c90e189d2c4ce 100644
--- a/doc/slides/job-initiation/penguin.eps
+++ b/doc/slides/job-initiation/penguin.eps
@@ -17,7 +17,7 @@
 %%BeginProlog
 %%BeginResource: procset Adobe_packedarray 2.0 0
 %%Title: (Packed Array Operators)
-%%Version: 2.0 
+%%Version: 2.0
 %%CreationDate: (8/2/90) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 userdict /Adobe_packedarray 5 dict dup begin put
@@ -60,7 +60,7 @@ currentdict readonly pop end
 Adobe_packedarray /initialize get exec
 %%BeginResource: procset Adobe_cshow 1.1 0
 %%Title: (cshow Operator)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (1/23/89) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -106,7 +106,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_customcolor 1.0 0
 %%Title: (Custom Color Operators)
-%%Version: 1.0 
+%%Version: 1.0
 %%CreationDate: (5/9/88) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -162,7 +162,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_IllustratorA_AI3 1.1 3
 %%Title: (Adobe Illustrator (R) Version 3.0 Abbreviated Prolog)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (3/7/1994) ()
 %%Copyright: ((C) 1987-1994 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -181,7 +181,7 @@ userdict /Adobe_IllustratorA_AI3_vars 58 dict dup begin put
 /_doClip 0 def
 /cf	currentflat def
 /_tm matrix def
-/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def 
+/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def
 /_renderEnd [null null null null /i1 /i1 /i1 /i1] def
 /_render -1 def
 /_rise 0 def
@@ -268,9 +268,9 @@ exch 5 -1 roll 3 index mul add
 /swj
 {
 dup 4 1 roll
-dup length exch stringwidth 
+dup length exch stringwidth
 exch 5 -1 roll 3 index mul add
-4 1 roll 3 1 roll mul add 
+4 1 roll 3 1 roll mul add
 6 2 roll /_cnt 0 ddef
 {1 index eq {/_cnt _cnt 1 add ddef} if} forall pop
 exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
@@ -279,7 +279,7 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
+	2 npop
 	(0) exch 2 copy 0 exch put pop
 	gsave
 	false charpath currentpoint
@@ -295,12 +295,12 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
+	2 npop
+	(0) exch 2 copy 0 exch put
 	gsave
-	_sp eq 
+	_sp eq
 		{
-		exch 6 index 6 index 6 index 5 -1 roll widthshow  
+		exch 6 index 6 index 6 index 5 -1 roll widthshow
 		currentpoint
 		}
 		{
@@ -326,11 +326,11 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 /jsp
 {
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
-	_sp eq 
+	2 npop
+	(0) exch 2 copy 0 exch put
+	_sp eq
 		{
-		exch 5 index 5 index 5 index 5 -1 roll widthshow  
+		exch 5 index 5 index 5 index 5 -1 roll widthshow
 		}
 		{
 		false charpath
@@ -445,11 +445,11 @@ closepath
 } def
 /N
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq {clip /_doClip 0 ddef} if 
+	_doClip 1 eq {clip /_doClip 0 ddef} if
 	newpath
-	} 
+	}
 	{
 	/CRender {N} ddef
 	}ifelse
@@ -458,17 +458,17 @@ _pola 0 eq
 {N} def
 /F
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _pf grestore clip newpath /_lp /none ddef _fc 
+		gsave _pf grestore clip newpath /_lp /none ddef _fc
 		/_doClip 0 ddef
 		}
 		{
 		_pf
 		}ifelse
-	} 
+	}
 	{
 	/CRender {F} ddef
 	}ifelse
@@ -480,17 +480,17 @@ F
 } def
 /S
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _ps grestore clip newpath /_lp /none ddef _sc 
+		gsave _ps grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
 		}
 		{
 		_ps
 		}ifelse
-	} 
+	}
 	{
 	/CRender {S} ddef
 	}ifelse
@@ -502,14 +502,14 @@ S
 } def
 /B
 {
-_pola 0 eq 
+_pola 0 eq
 	{
 	_doClip 1 eq
-	gsave F grestore 
+	gsave F grestore
 		{
 		gsave S grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
-		} 
+		}
 		{
 		S
 		}ifelse
@@ -529,10 +529,10 @@ B
 } def
 /*
 {
-count 0 ne 
+count 0 ne
 	{
 	dup type (stringtype) eq {pop} if
-	} if 
+	} if
 _pola 0 eq {newpath} if
 } def
 /u
@@ -553,7 +553,7 @@ _pola 1 add /_pola exch ddef
 } def
 /*U
 {
-_pola 1 sub /_pola exch ddef 
+_pola 1 sub /_pola exch ddef
 _pola 0 eq {CRender} if
 } def
 /D
@@ -601,7 +601,7 @@ _i restore
 {
 /_gf exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
@@ -719,7 +719,7 @@ jss
 findcmykcustomcolor
 /_if exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
diff --git a/doc/slides/job-initiation/slides.tex b/doc/slides/job-initiation/slides.tex
index 5b03a84b80cdee45f776842dd09736a27a7bd153..84d8646155c34934237408eb86bef66c81747b02 100644
--- a/doc/slides/job-initiation/slides.tex
+++ b/doc/slides/job-initiation/slides.tex
@@ -96,9 +96,9 @@
   \begin{center}
   Overall Design Goals\\
   \begin{itemize}
-     \item Scalable 
+     \item Scalable
      \item Fault Tolerant
-     \item Simplified 
+     \item Simplified
      \item Administrator Friendly
      \item Secure
   \end{itemize}
@@ -159,15 +159,15 @@
   \slideheading{\slurmd\ Initialization}
   \begin{center}
   \begin{itemize}
-  \item On startup, \slurmd\ will generate a private key  and 
+  \item On startup, \slurmd\ will generate a private key  and
         attempt to contact slurm controller to join the cluster.
-  \item If slurm controller is not up at time of \slurmd\ intialization, 
-        \slurmd\ will sleep and listen on a well-defined mongo-port for status 
-	requests from controller, or other entity. 
+  \item If slurm controller is not up at time of \slurmd\ intialization,
+        \slurmd\ will sleep and listen on a well-defined mongo-port for status
+	requests from controller, or other entity.
   \item On first contact with controller, \slurmd\ will exchange secret key
         by setting flag {\tt REFRESH\_KEY} in the corresponding message.
   \item After private key has been exchanged, \slurmd\ will wait for
-        further work 
+        further work
   \end{itemize}
   \end{center}
 \end{slide}
@@ -177,8 +177,8 @@
   \begin{center}
   \begin{itemize}
   \item For an interactive job, \srun\ will build an allocate request
-        to send to the controller, based upon user options. 
-  \item the {\tt MSG\_ALLOCATE\_REQ} message will contain the 
+        to send to the controller, based upon user options.
+  \item the {\tt MSG\_ALLOCATE\_REQ} message will contain the
         following information:
   \begin{itemize}
     \item immediate or block (flag)
@@ -189,7 +189,7 @@
   \end{itemize}
     optional:
   \begin{itemize}
-    \item requested time limit 
+    \item requested time limit
     \item constraint and requested feature list
   \end{itemize}
   \end{itemize}
@@ -208,7 +208,7 @@
     \item return and error code
     \item node list
     \item credential list (1 per node)
-    \item cpus per node 
+    \item cpus per node
   \end{itemize}
   \item if return code indicates an error, \srun\ will print
         text representation of error code on stderr and exit
@@ -259,13 +259,13 @@
   \slideheading{Interactive Job Initiation: cont'd ... }
   \begin{center}
   \begin{itemize}
-    \item One job run request message is built per node - \srun\ 
+    \item One job run request message is built per node - \srun\
           sends these messages to all nodes using a {\tt mongo\_sendto:}\\
-	  \begin{verbatim} md = mongo_sendto <host>:<mongo_port>, &addr 
+	  \begin{verbatim} md = mongo_sendto <host>:<mongo_port>, &addr
 	  \end{verbatim}
     \item \srun\ then waits for reply messages from all \slurmd 's
           with {\tt mongo\_recvfrom} or {\tt mongo\_select}
-    \item After a timeout, \srun\ sends status update to \slurmctld\ 
+    \item After a timeout, \srun\ sends status update to \slurmctld\
   \end{itemize}
   \end{center}
 \end{slide}
@@ -274,16 +274,16 @@
   \slideheading{Interactive Job Initiation: \slurmd}
   \begin{center}
   \begin{itemize}
-    \item \slurmd\ receives a message from \srun\ and spawns a 
+    \item \slurmd\ receives a message from \srun\ and spawns a
           request thread ({\tt req\_thr})
-    \item {\tt req\_thr} reads message header, sees that the message is 
+    \item {\tt req\_thr} reads message header, sees that the message is
           of type MSG\_JOB\_RUN\_REQUEST and executes the {\tt job\_thr()}
 	  function.
-    \item {\tt job\_thr} unpacks message body, then:  
-    \begin{itemize} 
+    \item {\tt job\_thr} unpacks message body, then:
+    \begin{itemize}
       \item if credential is non-null, decrypt credential to authorize job
       \item null credential: fall through to PAM
-      \item if job and user are authorized, instantiate a task thread for 
+      \item if job and user are authorized, instantiate a task thread for
             each task requested and send MSG\_JOB\_RUN\_REPLY.
     \end{itemize}
  \end{itemize}
@@ -308,7 +308,7 @@
     \item {\tt job\_thr} continues to listen for job control information
           over original connection (signals, cancel, etc.)
     \item {\tt job\_thr} on node 0 of job will monitor connection to
-          \srun\ and will initiate a new \srun\ on local node if 
+          \srun\ and will initiate a new \srun\ on local node if
 	  necessary.
   \end{itemize}
   \end{center}
@@ -335,7 +335,7 @@
   \begin{center}
   \begin{itemize}
     \item a batch submission consists of a script that will be invoked
-          on only one node in the allocation 
+          on only one node in the allocation
     \item \srun\ sends MSG\_BATCH\_SUBMIT\_REQ to \slurmctld , which contains
     \begin{itemize}
       \item user info
@@ -384,13 +384,13 @@
     \item \srun\ must query \slurmctld , however, for two reasons:
     \begin{itemize}
       \item \srun\ doesn't know number of cpus for each node
-      \item safer for \srun\ to obtain switch resources from controller 
+      \item safer for \srun\ to obtain switch resources from controller
     \end{itemize}
     \item Therefore, \srun\ will first query \slurmctld\ for details of
           its allocation
     \item \srun\ will then process command line options relative to this
           allocation. If a subset of nodes is specified, \srun\ will need
-	  to requery the controller for switch resources. (We may be 
+	  to requery the controller for switch resources. (We may be
 	  able to get away without doing this $2^{nd}$ query)
   \end{itemize}
   \end{center}
@@ -418,10 +418,10 @@
 %    \item User specifies job name or jobid on command line
 %    \item \srun\ connects to \slurmctld\ and sends a MSG\_JOB\_ATTACH\_REQ:
 %    \begin{itemize}
-%      \item job id 
+%      \item job id
 %      \item job name
 %    \end{itemize}
-%    \item MSG\_JOB\_ATTACH\_REPLY contains similar information as 
+%    \item MSG\_JOB\_ATTACH\_REPLY contains similar information as
 %          MSG\_ALLOCATE\_REPLY
 %    \item \srun\ now has credentials to contact \slurmds\ in job
 %  \end{itemize}
diff --git a/doc/slides/qsnet/Makefile b/doc/slides/qsnet/Makefile
index c4b9d96e61e04702ac245a42cc76ce7f9e3869be..48f710ee52d74464af4c5535ea610c57a7edd164 100644
--- a/doc/slides/qsnet/Makefile
+++ b/doc/slides/qsnet/Makefile
@@ -18,5 +18,5 @@ slides.ps: slides.dvi
 	tgif -print -eps $<
 
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
diff --git a/doc/slides/qsnet/penguin.eps b/doc/slides/qsnet/penguin.eps
index 46819fc95ef19371934999910391bd01a47c0eee..48b94b89a2fc183b89fb4a375d1c90e189d2c4ce 100644
--- a/doc/slides/qsnet/penguin.eps
+++ b/doc/slides/qsnet/penguin.eps
@@ -17,7 +17,7 @@
 %%BeginProlog
 %%BeginResource: procset Adobe_packedarray 2.0 0
 %%Title: (Packed Array Operators)
-%%Version: 2.0 
+%%Version: 2.0
 %%CreationDate: (8/2/90) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 userdict /Adobe_packedarray 5 dict dup begin put
@@ -60,7 +60,7 @@ currentdict readonly pop end
 Adobe_packedarray /initialize get exec
 %%BeginResource: procset Adobe_cshow 1.1 0
 %%Title: (cshow Operator)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (1/23/89) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -106,7 +106,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_customcolor 1.0 0
 %%Title: (Custom Color Operators)
-%%Version: 1.0 
+%%Version: 1.0
 %%CreationDate: (5/9/88) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -162,7 +162,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_IllustratorA_AI3 1.1 3
 %%Title: (Adobe Illustrator (R) Version 3.0 Abbreviated Prolog)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (3/7/1994) ()
 %%Copyright: ((C) 1987-1994 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -181,7 +181,7 @@ userdict /Adobe_IllustratorA_AI3_vars 58 dict dup begin put
 /_doClip 0 def
 /cf	currentflat def
 /_tm matrix def
-/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def 
+/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def
 /_renderEnd [null null null null /i1 /i1 /i1 /i1] def
 /_render -1 def
 /_rise 0 def
@@ -268,9 +268,9 @@ exch 5 -1 roll 3 index mul add
 /swj
 {
 dup 4 1 roll
-dup length exch stringwidth 
+dup length exch stringwidth
 exch 5 -1 roll 3 index mul add
-4 1 roll 3 1 roll mul add 
+4 1 roll 3 1 roll mul add
 6 2 roll /_cnt 0 ddef
 {1 index eq {/_cnt _cnt 1 add ddef} if} forall pop
 exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
@@ -279,7 +279,7 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
+	2 npop
 	(0) exch 2 copy 0 exch put pop
 	gsave
 	false charpath currentpoint
@@ -295,12 +295,12 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
+	2 npop
+	(0) exch 2 copy 0 exch put
 	gsave
-	_sp eq 
+	_sp eq
 		{
-		exch 6 index 6 index 6 index 5 -1 roll widthshow  
+		exch 6 index 6 index 6 index 5 -1 roll widthshow
 		currentpoint
 		}
 		{
@@ -326,11 +326,11 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 /jsp
 {
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
-	_sp eq 
+	2 npop
+	(0) exch 2 copy 0 exch put
+	_sp eq
 		{
-		exch 5 index 5 index 5 index 5 -1 roll widthshow  
+		exch 5 index 5 index 5 index 5 -1 roll widthshow
 		}
 		{
 		false charpath
@@ -445,11 +445,11 @@ closepath
 } def
 /N
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq {clip /_doClip 0 ddef} if 
+	_doClip 1 eq {clip /_doClip 0 ddef} if
 	newpath
-	} 
+	}
 	{
 	/CRender {N} ddef
 	}ifelse
@@ -458,17 +458,17 @@ _pola 0 eq
 {N} def
 /F
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _pf grestore clip newpath /_lp /none ddef _fc 
+		gsave _pf grestore clip newpath /_lp /none ddef _fc
 		/_doClip 0 ddef
 		}
 		{
 		_pf
 		}ifelse
-	} 
+	}
 	{
 	/CRender {F} ddef
 	}ifelse
@@ -480,17 +480,17 @@ F
 } def
 /S
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _ps grestore clip newpath /_lp /none ddef _sc 
+		gsave _ps grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
 		}
 		{
 		_ps
 		}ifelse
-	} 
+	}
 	{
 	/CRender {S} ddef
 	}ifelse
@@ -502,14 +502,14 @@ S
 } def
 /B
 {
-_pola 0 eq 
+_pola 0 eq
 	{
 	_doClip 1 eq
-	gsave F grestore 
+	gsave F grestore
 		{
 		gsave S grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
-		} 
+		}
 		{
 		S
 		}ifelse
@@ -529,10 +529,10 @@ B
 } def
 /*
 {
-count 0 ne 
+count 0 ne
 	{
 	dup type (stringtype) eq {pop} if
-	} if 
+	} if
 _pola 0 eq {newpath} if
 } def
 /u
@@ -553,7 +553,7 @@ _pola 1 add /_pola exch ddef
 } def
 /*U
 {
-_pola 1 sub /_pola exch ddef 
+_pola 1 sub /_pola exch ddef
 _pola 0 eq {CRender} if
 } def
 /D
@@ -601,7 +601,7 @@ _i restore
 {
 /_gf exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
@@ -719,7 +719,7 @@ jss
 findcmykcustomcolor
 /_if exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
diff --git a/doc/slides/qsnet/slides.tex b/doc/slides/qsnet/slides.tex
index fab9e22122fb1baa1467b69694030bffbd07f8b7..43eef770d54fbad30cdbc5ae4a2c40b05200156c 100644
--- a/doc/slides/qsnet/slides.tex
+++ b/doc/slides/qsnet/slides.tex
@@ -22,9 +22,9 @@
 \renewcommand{\footrulewidth}{0.2mm}
 \fancyhead[C]{\Large\textbf{QsNet Support}}
 %\fancyfoot[L]{\tiny\thedate}
-\fancyfoot[L]{\includegraphics[scale=0.075]{penguin.eps}\\\tiny LINUX} 
+\fancyfoot[L]{\includegraphics[scale=0.075]{penguin.eps}\\\tiny LINUX}
 %\fancyfoot[R]{\small LLNL}
-\fancyfoot[R]{\includegraphics[scale=0.2]{llnl.ps}\\\tiny LLNL} 
+\fancyfoot[R]{\includegraphics[scale=0.2]{llnl.ps}\\\tiny LLNL}
 \fancyfoot[C]{\tiny Page \theslide}
 
 % Create room for headers and footers
@@ -116,10 +116,10 @@
 \begin{slide}
   \slideheading{Client Side Functions}
   \small
-  Create all the interconnect information necessary to start a parallel 
+  Create all the interconnect information necessary to start a parallel
   program including Elan capability, program description number, etc..
   \begin{verbatim}
-  int qsw_setup_jobinfo(qsw_jobinfo_t job, int nprocs, 
+  int qsw_setup_jobinfo(qsw_jobinfo_t job, int nprocs,
                         bitstr_t *nodeset, int cyclic_alloc);
   \end{verbatim}
   Allocate/free a {\tt qsw\_jobinfo\_t} to represent job information.
@@ -149,7 +149,7 @@
   int qsw_prog_init(qsw_jobinfo_t jobinfo, uid_t uid);
   int qsw_prog_fini(qsw_jobinfo_t jobinfo);
   \end{verbatim}
-  3: (N instances) Attach to a hardware context with {\tt qsw\_attach}.  
+  3: (N instances) Attach to a hardware context with {\tt qsw\_attach}.
   Set up environment.  Fork.  Wait.  Exit.
   \begin{verbatim}
   int qsw_attach(qsw_jobinfo_t jobinfo, int procnum);
@@ -167,7 +167,7 @@
   \item Pack/Unpack routines translate between host/network byte order.
   Macros used to permit assertions about size of incoming data types before
   type promotion.
-  \item bitstr\_t used to represent ``nodeset''.  Bit positions are elan 
+  \item bitstr\_t used to represent ``nodeset''.  Bit positions are elan
   addresses.  There is no provision for converting between Elan address and
   hostname.
   \item Elan include files are not required outside of qsw.c.
@@ -198,6 +198,6 @@
   unpack32(&ls->ls_hwcontext, &data, &len);
   \end{verbatim}
 \end{slide}
- 
+
 
 \end{document}
diff --git a/doc/slides/users/Makefile b/doc/slides/users/Makefile
index ebb8bcf9881651ab5fdffaab4bbcf31a0a033e40..2ebcd21a35ef4475dd23a1b467de52f1e822d10a 100644
--- a/doc/slides/users/Makefile
+++ b/doc/slides/users/Makefile
@@ -4,7 +4,7 @@ all: slides
 clean:
 	rm -f slides.dvi slides.aux slides.log slides.ps slides.pdf slides.out
 
-slides: slides.tex 
+slides: slides.tex
 	(TEXINPUTS=.:./tex:./figures:../../common::; export TEXINPUTS; \
 	 latex slides; latex slides; \
 	 dvips -K -Ppdf -G0 slides.dvi -o slides.ps; \
diff --git a/doc/slides/users/figures/arch.eps b/doc/slides/users/figures/arch.eps
index 4b4f1dea9e582deef295b70b90f94c01cee22ba7..3e4f04b3b9ea0d004bd2cb9bb21ce46a5adcdb2b 100644
--- a/doc/slides/users/figures/arch.eps
+++ b/doc/slides/users/figures/arch.eps
@@ -440,7 +440,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.939330 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -466,7 +466,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.354197 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 1984 0 moveto
@@ -497,7 +497,7 @@ start_ol
 704 866 1033 1024 1394 1024 curveto
 1575 1024 1772 1003 1984 966 curveto
 1984 702 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.769063 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -532,7 +532,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.183930 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -558,7 +558,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.598797 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 2851 896 moveto
@@ -577,7 +577,7 @@ start_ol
 829 1571 1135 1722 1514 1722 curveto
 1892 1722 2193 1575 2276 1344 curveto
 752 1344 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.013663 13.825000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -600,7 +600,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -645,7 +645,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.553163 16.325000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -676,7 +676,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.968030 16.325000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -706,7 +706,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.382897 16.325000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -741,7 +741,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -786,7 +786,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.142530 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2240 -576 moveto
@@ -822,7 +822,7 @@ start_ol
 802 820 704 991 704 1149 curveto
 704 1307 802 1478 951 1584 curveto
 1085 1674 1256 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 3.557397 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -852,7 +852,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.972263 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2851 896 moveto
@@ -871,7 +871,7 @@ start_ol
 829 1571 1135 1722 1514 1722 curveto
 1892 1722 2193 1575 2276 1344 curveto
 752 1344 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.387130 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -901,7 +901,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.801997 11.075000 translate 0.035278 -0.035278 scale
 start_ol
 2851 896 moveto
@@ -920,7 +920,7 @@ start_ol
 829 1571 1135 1722 1514 1722 curveto
 1892 1722 2193 1575 2276 1344 curveto
 752 1344 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -965,7 +965,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.349963 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1792 2240 moveto
@@ -993,7 +993,7 @@ start_ol
 1088 2656 lineto
 1728 2656 lineto
 1728 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.764830 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -1028,7 +1028,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.179697 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1536 1722 moveto
@@ -1066,7 +1066,7 @@ start_ol
 2444 508 2397 518 2257 518 curveto
 1536 518 lineto
 1536 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.594563 8.325000 translate 0.035278 -0.035278 scale
 start_ol
 1552 2240 moveto
@@ -1079,7 +1079,7 @@ start_ol
 2368 781 2005 518 1536 518 curveto
 1067 518 704 781 704 1120 curveto
 704 1464 1067 1722 1547 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.462745 0.615686 0.462745 srgb
 n 12.150000 9.000000 m 12.150000 12.650000 l 20.350000 12.650000 l 20.350000 9.000000 l f
 n 12.150000 9.400000 m 12.150000 9.400000 0.400000 0.400000 180.000000 270.000000 ellipse f
@@ -1132,7 +1132,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 14.793733 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1155,7 +1155,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.208600 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -1185,7 +1185,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.623467 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1216,7 +1216,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.038333 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -1253,7 +1253,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.453200 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -1279,7 +1279,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.868067 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1088 1722 moveto
@@ -1311,7 +1311,7 @@ start_ol
 1337 518 1192 551 1129 613 curveto
 1098 646 1088 689 1088 789 curveto
 1088 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.282933 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1334,7 +1334,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.697800 10.575000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -1368,7 +1368,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 14.472000 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 2241 3276 moveto
@@ -1382,7 +1382,7 @@ start_ol
 1920 1768 2081 2265 2444 2846 curveto
 2496 2929 2506 2960 2506 3022 curveto
 2506 3162 2392 3276 2241 3276 curveto
-end_ol grestore 
+end_ol grestore
 gsave 14.886867 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 896 351 moveto
@@ -1417,7 +1417,7 @@ start_ol
 1211 576 896 815 896 1154 curveto
 896 1307 994 1483 1144 1584 curveto
 1279 1679 1449 1722 1667 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 15.301733 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1448,7 +1448,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.716600 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1792 2240 moveto
@@ -1476,7 +1476,7 @@ start_ol
 1088 2656 lineto
 1728 2656 lineto
 1728 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.131467 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -1513,7 +1513,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.546333 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1984 0 moveto
@@ -1544,7 +1544,7 @@ start_ol
 704 866 1033 1024 1394 1024 curveto
 1575 1024 1772 1003 1984 966 curveto
 1984 702 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.961200 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1575,7 +1575,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.376067 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 1255 -15 moveto
@@ -1609,7 +1609,7 @@ start_ol
 197 2214 109 2120 109 1980 curveto
 109 1834 202 1741 363 1720 curveto
 1255 -15 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.790933 11.475000 translate 0.035278 -0.035278 scale
 start_ol
 895 3276 moveto
@@ -1623,7 +1623,7 @@ start_ol
 1569 5 1792 669 1792 1223 curveto
 1792 1794 1605 2390 1237 2976 curveto
 1087 3219 1019 3276 895 3276 curveto
-end_ol grestore 
+end_ol grestore
 0.870588 0.870588 0.870588 srgb
 n 16.150000 4.000000 m 16.150000 7.250000 l 22.350000 7.250000 l 22.350000 4.000000 l f
 n 16.150000 4.400000 m 16.150000 4.400000 0.400000 0.400000 180.000000 270.000000 ellipse f
@@ -1676,7 +1676,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 17.793733 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1699,7 +1699,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 18.208600 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -1729,7 +1729,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 18.623467 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -1760,7 +1760,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.038333 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -1797,7 +1797,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.453200 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -1823,7 +1823,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.868067 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1088 1722 moveto
@@ -1855,7 +1855,7 @@ start_ol
 1337 518 1192 551 1129 613 curveto
 1098 646 1088 689 1088 789 curveto
 1088 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.282933 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -1878,7 +1878,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.697800 5.375000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -1912,7 +1912,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 17.679433 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 2241 3276 moveto
@@ -1926,7 +1926,7 @@ start_ol
 1920 1768 2081 2265 2444 2846 curveto
 2496 2929 2506 2960 2506 3022 curveto
 2506 3162 2392 3276 2241 3276 curveto
-end_ol grestore 
+end_ol grestore
 gsave 18.094300 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 896 3137 moveto
@@ -1959,7 +1959,7 @@ start_ol
 1211 518 896 768 896 1127 curveto
 896 1277 968 1427 1098 1540 curveto
 1243 1667 1413 1722 1667 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 18.509167 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 1984 0 moveto
@@ -1990,7 +1990,7 @@ start_ol
 704 866 1033 1024 1394 1024 curveto
 1575 1024 1772 1003 1984 966 curveto
 1984 702 lineto
-end_ol grestore 
+end_ol grestore
 gsave 18.924033 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -2016,7 +2016,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.338900 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 1088 843 moveto
@@ -2056,7 +2056,7 @@ start_ol
 128 88 244 0 476 0 curveto
 1088 0 lineto
 1088 843 lineto
-end_ol grestore 
+end_ol grestore
 gsave 19.753767 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2086,7 +2086,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.168633 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 896 351 moveto
@@ -2121,7 +2121,7 @@ start_ol
 1211 576 896 815 896 1154 curveto
 896 1307 994 1483 1144 1584 curveto
 1279 1679 1449 1722 1667 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 20.583500 6.275000 translate 0.035278 -0.035278 scale
 start_ol
 895 3276 moveto
@@ -2135,7 +2135,7 @@ start_ol
 1569 5 1792 669 1792 1223 curveto
 1792 1794 1605 2390 1237 2976 curveto
 1087 3219 1019 3276 895 3276 curveto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -2180,7 +2180,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.781897 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 2769 1898 moveto
@@ -2206,7 +2206,7 @@ start_ol
 2593 1229 2671 1265 2722 1324 curveto
 2759 1364 2769 1410 2769 1527 curveto
 2769 1898 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.196763 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1552 2240 moveto
@@ -2219,7 +2219,7 @@ start_ol
 2368 781 2005 518 1536 518 curveto
 1067 518 704 781 704 1120 curveto
 704 1464 1067 1722 1547 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 3.611630 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1024 2240 moveto
@@ -2254,7 +2254,7 @@ start_ol
 2157 2174 1948 2240 1720 2240 curveto
 1441 2240 1268 2170 1024 1954 curveto
 1024 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.026497 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1088 1722 moveto
@@ -2286,7 +2286,7 @@ start_ol
 1337 518 1192 551 1129 613 curveto
 1098 646 1088 689 1088 789 curveto
 1088 1722 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.441363 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2317,7 +2317,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.856230 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1552 2240 moveto
@@ -2330,7 +2330,7 @@ start_ol
 2368 781 2005 518 1536 518 curveto
 1067 518 704 781 704 1120 curveto
 704 1464 1067 1722 1547 1722 curveto
-end_ol grestore 
+end_ol grestore
 gsave 5.271097 5.575000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2353,7 +2353,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 0.462745 0.619608 0.462745 srgb
 n 9.250000 18.000000 m 9.250000 20.000000 l 12.750000 20.000000 l 12.750000 18.000000 l f
 0.100000 slw
@@ -2394,7 +2394,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 10.166033 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2417,7 +2417,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 10.580900 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2447,7 +2447,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 10.995767 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2478,7 +2478,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.410633 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -2515,7 +2515,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.825500 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -2549,7 +2549,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.462745 0.619608 0.462745 srgb
 n 19.500000 18.000000 m 19.500000 20.000000 l 23.000000 20.000000 l 23.000000 18.000000 l f
 0.100000 slw
@@ -2590,7 +2590,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.416033 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2613,7 +2613,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 20.830900 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2643,7 +2643,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.245767 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2674,7 +2674,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.660633 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -2711,7 +2711,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.075500 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -2745,7 +2745,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.462745 0.619608 0.462745 srgb
 n 13.500000 18.000000 m 13.500000 20.000000 l 17.000000 20.000000 l 17.000000 18.000000 l f
 0.100000 slw
@@ -2786,7 +2786,7 @@ start_ol
 1024 1642 1281 1722 1554 1722 curveto
 1779 1722 1956 1681 2106 1591 curveto
 2138 1550 lineto
-end_ol grestore 
+end_ol grestore
 gsave 14.416033 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1792 3200 moveto
@@ -2809,7 +2809,7 @@ start_ol
 2606 508 2564 518 2424 518 curveto
 1792 518 lineto
 1792 3200 lineto
-end_ol grestore 
+end_ol grestore
 gsave 14.830900 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2240 moveto
@@ -2839,7 +2839,7 @@ start_ol
 2894 111 2940 215 2940 326 curveto
 2940 449 2826 518 2624 518 curveto
 2624 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.245767 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 1408 2240 moveto
@@ -2870,7 +2870,7 @@ start_ol
 2621 2179 2455 2240 2274 2240 curveto
 2020 2240 1823 2155 1408 1857 curveto
 1408 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 15.660633 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 704 2240 moveto
@@ -2907,7 +2907,7 @@ start_ol
 1514 2157 1344 2240 1143 2240 curveto
 988 2240 885 2199 704 2071 curveto
 704 2240 lineto
-end_ol grestore 
+end_ol grestore
 gsave 16.075500 19.200000 translate 0.035278 -0.035278 scale
 start_ol
 2752 3137 moveto
@@ -2941,7 +2941,7 @@ start_ol
 776 818 704 968 704 1122 curveto
 704 1272 776 1427 905 1540 curveto
 1049 1667 1219 1722 1467 1722 curveto
-end_ol grestore 
+end_ol grestore
 0.200000 slw
 [1.000000] 0 sd
 [0.200000] 0 sd
@@ -2962,7 +2962,7 @@ start_ol
 3083 0 3712 528 3712 1348 curveto
 3712 4224 lineto
 3200 4224 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.550333 2.500000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -2983,7 +2983,7 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.931333 2.500000 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3002,7 +3002,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.354667 2.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3016,7 +3016,7 @@ start_ol
 1357 3008 1115 2849 832 2463 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.000000 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 3769 2899 moveto
@@ -3034,7 +3034,7 @@ start_ol
 2427 3752 2744 3654 2917 3500 curveto
 3072 3364 3159 3194 3222 2899 curveto
 3769 2899 lineto
-end_ol grestore 
+end_ol grestore
 gsave 3.550333 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3047,7 +3047,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 3.973667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3070,7 +3070,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.608667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3093,7 +3093,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.243667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 3115 414 moveto
@@ -3122,7 +3122,7 @@ start_ol
 768 1144 977 1289 1481 1357 curveto
 1980 1420 2081 1440 2240 1508 curveto
 2240 1019 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.667000 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3138,7 +3138,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 6.090333 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3156,7 +3156,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 6.513667 3.500000 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -3177,7 +3177,7 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 0.100000 slw
 [] 0 sd
 [] 0 sd
@@ -3328,7 +3328,7 @@ start_ol
 2427 3752 2744 3654 2917 3500 curveto
 3072 3364 3159 3194 3222 2899 curveto
 3769 2899 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.880133 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3341,7 +3341,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 22.303467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3357,7 +3357,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.726800 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 1519 3008 moveto
@@ -3377,7 +3377,7 @@ start_ol
 1024 2616 lineto
 1519 2616 lineto
 1519 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.938467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3391,7 +3391,7 @@ start_ol
 1357 3008 1115 2849 832 2463 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.192467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3404,7 +3404,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 23.615800 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 896 4224 moveto
@@ -3412,7 +3412,7 @@ start_ol
 384 0 lineto
 896 0 lineto
 896 4224 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.785133 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 896 4224 moveto
@@ -3420,7 +3420,7 @@ start_ol
 384 0 lineto
 896 0 lineto
 896 4224 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.954467 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3439,7 +3439,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.377800 8.843570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3453,7 +3453,7 @@ start_ol
 1357 3008 1115 2849 832 2463 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 21.417600 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3471,7 +3471,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 21.840933 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 3115 414 moveto
@@ -3500,7 +3500,7 @@ start_ol
 768 1144 977 1289 1481 1357 curveto
 1980 1420 2081 1440 2240 1508 curveto
 2240 1019 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.264267 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3519,7 +3519,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 22.687600 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3542,7 +3542,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.322600 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3555,7 +3555,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 23.745933 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3571,7 +3571,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.169267 9.593570 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -3592,7 +3592,7 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.320533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 3769 2899 moveto
@@ -3610,7 +3610,7 @@ start_ol
 2427 3752 2744 3654 2917 3500 curveto
 3072 3364 3159 3194 3222 2899 curveto
 3769 2899 lineto
-end_ol grestore 
+end_ol grestore
 gsave 23.870867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3623,7 +3623,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 24.294200 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3646,7 +3646,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.929200 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 320 -1270 moveto
@@ -3664,7 +3664,7 @@ start_ol
 2496 870 2155 449 1647 449 curveto
 1156 449 832 865 832 1504 curveto
 832 2143 1156 2559 1647 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 25.352533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2816 0 moveto
@@ -3680,7 +3680,7 @@ start_ol
 1754 0 2029 154 2304 546 curveto
 2304 0 lineto
 2816 0 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.775867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 1519 3008 moveto
@@ -3700,7 +3700,7 @@ start_ol
 1024 2616 lineto
 1519 2616 lineto
 1519 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.987533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3719,10 +3719,10 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 26.410867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 26.622533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3738,7 +3738,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 27.045867 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3751,7 +3751,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 27.469200 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3769,7 +3769,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 27.892533 18.817300 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3788,7 +3788,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 24.242400 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 2880 4224 moveto
@@ -3806,7 +3806,7 @@ start_ol
 2368 865 2033 449 1524 449 curveto
 993 449 640 870 640 1504 curveto
 640 2138 993 2559 1518 2559 curveto
-end_ol grestore 
+end_ol grestore
 gsave 24.665733 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 3115 414 moveto
@@ -3835,7 +3835,7 @@ start_ol
 768 1144 977 1289 1481 1357 curveto
 1980 1420 2081 1440 2240 1508 curveto
 2240 1019 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.089067 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 2955 1344 moveto
@@ -3854,7 +3854,7 @@ start_ol
 2079 2564 2442 2209 2442 1759 curveto
 2442 1749 2442 1739 2436 1728 curveto
 743 1728 lineto
-end_ol grestore 
+end_ol grestore
 gsave 25.512400 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3877,7 +3877,7 @@ start_ol
 1375 3008 1100 2859 832 2499 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 26.147400 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 1562 3008 moveto
@@ -3890,7 +3890,7 @@ start_ol
 2432 849 2098 444 1568 444 curveto
 1032 444 704 844 704 1504 curveto
 704 2159 1032 2564 1568 2564 curveto
-end_ol grestore 
+end_ol grestore
 gsave 26.570733 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 384 3008 moveto
@@ -3906,7 +3906,7 @@ start_ol
 1392 3008 1100 2837 832 2421 curveto
 832 3008 lineto
 384 3008 lineto
-end_ol grestore 
+end_ol grestore
 gsave 26.994067 19.514800 translate 0.035278 -0.035278 scale
 start_ol
 2624 2128 moveto
@@ -3927,5 +3927,5 @@ start_ol
 768 2400 1038 2564 1465 2564 curveto
 1886 2564 2112 2413 2112 2128 curveto
 2624 2128 lineto
-end_ol grestore 
+end_ol grestore
 showpage
diff --git a/doc/slides/users/figures/entities0.eps b/doc/slides/users/figures/entities0.eps
index bcb24d0107f561e919cb68494ae29e8e1d97324b..3869b041a20ec249b946fc1ec91672e7a5bdf689 100644
--- a/doc/slides/users/figures/entities0.eps
+++ b/doc/slides/users/figures/entities0.eps
@@ -145,7 +145,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -158,7 +158,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -177,7 +177,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -197,7 +197,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 3.975000 m 3.375000 4.275000 l 5.750000 4.275000 l 5.750000 3.975000 l f
 0.050000 slw
@@ -229,7 +229,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -242,7 +242,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -261,7 +261,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -281,7 +281,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 3.975000 m 6.375000 4.275000 l 8.750000 4.275000 l 8.750000 3.975000 l f
 0.050000 slw
@@ -313,7 +313,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -326,7 +326,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -345,7 +345,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -365,7 +365,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 7.975000 m 3.375000 8.275000 l 5.750000 8.275000 l 5.750000 7.975000 l f
 0.050000 slw
@@ -397,7 +397,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -410,7 +410,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -429,7 +429,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -449,7 +449,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 7.975000 m 6.375000 8.275000 l 8.750000 8.275000 l 8.750000 7.975000 l f
 0.050000 slw
@@ -481,7 +481,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -494,7 +494,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -513,7 +513,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -533,7 +533,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 9.975000 m 3.375000 10.275000 l 5.750000 10.275000 l 5.750000 9.975000 l f
 0.050000 slw
@@ -565,7 +565,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -578,7 +578,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -597,7 +597,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -617,7 +617,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 9.975000 m 6.375000 10.275000 l 8.750000 10.275000 l 8.750000 9.975000 l f
 0.050000 slw
@@ -649,7 +649,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -662,7 +662,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -681,7 +681,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -701,7 +701,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 5.975000 m 3.375000 6.275000 l 5.750000 6.275000 l 5.750000 5.975000 l f
 0.050000 slw
@@ -733,7 +733,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -746,7 +746,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -765,7 +765,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -785,7 +785,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 5.975000 m 6.375000 6.275000 l 8.750000 6.275000 l 8.750000 5.975000 l f
 0.050000 slw
@@ -817,7 +817,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -830,7 +830,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -849,7 +849,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -869,7 +869,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 3.975000 m 10.375000 4.275000 l 12.750000 4.275000 l 12.750000 3.975000 l f
 0.050000 slw
@@ -901,7 +901,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -914,7 +914,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -933,7 +933,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -953,7 +953,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 5.975000 m 10.375000 6.275000 l 12.750000 6.275000 l 12.750000 5.975000 l f
 0.050000 slw
@@ -985,7 +985,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -998,7 +998,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1017,7 +1017,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1037,7 +1037,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 7.975000 m 10.375000 8.275000 l 12.750000 8.275000 l 12.750000 7.975000 l f
 0.050000 slw
@@ -1069,7 +1069,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1082,7 +1082,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1101,7 +1101,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1121,7 +1121,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 9.975000 m 10.375000 10.275000 l 12.750000 10.275000 l 12.750000 9.975000 l f
 0.050000 slw
diff --git a/doc/slides/users/figures/entities1.eps b/doc/slides/users/figures/entities1.eps
index 1d835679d1ac0fd83e34e9353d9f84f0d6b73aec..3da76d1ac39d8e50f2524011dc5a0bb473bdb899 100644
--- a/doc/slides/users/figures/entities1.eps
+++ b/doc/slides/users/figures/entities1.eps
@@ -154,7 +154,7 @@ start_ol
 1773 2440 1920 2285 1920 1954 curveto
 1920 1627 1773 1472 1454 1472 curveto
 896 1472 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.335600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2113 98 moveto
@@ -182,7 +182,7 @@ start_ol
 1186 997 lineto
 1325 1022 1348 1030 1408 1059 curveto
 1408 920 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.631933 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -196,7 +196,7 @@ start_ol
 1179 2176 953 2009 832 1719 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.843600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -216,7 +216,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.021400 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -229,7 +229,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.173800 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -249,7 +249,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.351600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -262,7 +262,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.504000 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1212 2176 moveto
@@ -275,7 +275,7 @@ start_ol
 1728 712 1518 456 1216 456 curveto
 910 456 704 712 704 1088 curveto
 704 1464 910 1720 1216 1720 curveto
-end_ol grestore 
+end_ol grestore
 gsave 5.834200 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -291,10 +291,10 @@ start_ol
 1197 2176 996 2070 832 1836 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 6.164400 11.750000 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 6.316800 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 960 1972 moveto
@@ -305,7 +305,7 @@ start_ol
 1064 2520 769 2347 275 2347 curveto
 275 1972 lineto
 960 1972 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.191900 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 896 1024 moveto
@@ -322,7 +322,7 @@ start_ol
 1773 2440 1920 2285 1920 1954 curveto
 1920 1627 1773 1472 1454 1472 curveto
 896 1472 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.547500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2113 98 moveto
@@ -350,7 +350,7 @@ start_ol
 1186 997 lineto
 1325 1022 1348 1030 1408 1059 curveto
 1408 920 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.843833 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -364,7 +364,7 @@ start_ol
 1179 2176 953 2009 832 1719 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.055500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -384,7 +384,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.233300 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -397,7 +397,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.385700 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -417,7 +417,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.563500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -430,7 +430,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.715900 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1212 2176 moveto
@@ -443,7 +443,7 @@ start_ol
 1728 712 1518 456 1216 456 curveto
 910 456 704 712 704 1088 curveto
 704 1464 910 1720 1216 1720 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.046100 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -459,10 +459,10 @@ start_ol
 1197 2176 996 2070 832 1836 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.376300 11.750000 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 13.528700 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2112 504 moveto
@@ -480,7 +480,7 @@ start_ol
 282 749 128 506 128 0 curveto
 2112 0 lineto
 2112 504 lineto
-end_ol grestore 
+end_ol grestore
 1.000000 1.000000 1.000000 srgb
 n 3.500000 3.000000 m 3.500000 4.175000 l 5.625000 4.175000 l 5.625000 3.000000 l f
 0.100000 slw
@@ -504,7 +504,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -517,7 +517,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -536,7 +536,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -556,7 +556,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 3.975000 m 3.375000 4.275000 l 5.750000 4.275000 l 5.750000 3.975000 l f
 0.050000 slw
@@ -588,7 +588,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -601,7 +601,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -620,7 +620,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -640,7 +640,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 3.975000 m 6.375000 4.275000 l 8.750000 4.275000 l 8.750000 3.975000 l f
 0.050000 slw
@@ -672,7 +672,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -685,7 +685,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -704,7 +704,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -724,7 +724,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 7.975000 m 3.375000 8.275000 l 5.750000 8.275000 l 5.750000 7.975000 l f
 0.050000 slw
@@ -756,7 +756,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -769,7 +769,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -788,7 +788,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -808,7 +808,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 7.975000 m 6.375000 8.275000 l 8.750000 8.275000 l 8.750000 7.975000 l f
 0.050000 slw
@@ -840,7 +840,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -853,7 +853,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -872,7 +872,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -892,7 +892,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 9.975000 m 3.375000 10.275000 l 5.750000 10.275000 l 5.750000 9.975000 l f
 0.050000 slw
@@ -924,7 +924,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -937,7 +937,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -956,7 +956,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -976,7 +976,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 9.975000 m 6.375000 10.275000 l 8.750000 10.275000 l 8.750000 9.975000 l f
 0.050000 slw
@@ -1008,7 +1008,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1021,7 +1021,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1040,7 +1040,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1060,7 +1060,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 5.975000 m 3.375000 6.275000 l 5.750000 6.275000 l 5.750000 5.975000 l f
 0.050000 slw
@@ -1092,7 +1092,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1105,7 +1105,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1124,7 +1124,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1144,7 +1144,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 5.975000 m 6.375000 6.275000 l 8.750000 6.275000 l 8.750000 5.975000 l f
 0.050000 slw
@@ -1176,7 +1176,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1189,7 +1189,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1208,7 +1208,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1228,7 +1228,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 3.975000 m 10.375000 4.275000 l 12.750000 4.275000 l 12.750000 3.975000 l f
 0.050000 slw
@@ -1260,7 +1260,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1273,7 +1273,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1292,7 +1292,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1312,7 +1312,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 5.975000 m 10.375000 6.275000 l 12.750000 6.275000 l 12.750000 5.975000 l f
 0.050000 slw
@@ -1344,7 +1344,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1357,7 +1357,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1376,7 +1376,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1396,7 +1396,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 7.975000 m 10.375000 8.275000 l 12.750000 8.275000 l 12.750000 7.975000 l f
 0.050000 slw
@@ -1428,7 +1428,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1441,7 +1441,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1460,7 +1460,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1480,7 +1480,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 9.975000 m 10.375000 10.275000 l 12.750000 10.275000 l 12.750000 9.975000 l f
 0.050000 slw
diff --git a/doc/slides/users/figures/entities2.eps b/doc/slides/users/figures/entities2.eps
index 4b0041a785431c7d772fd3c07c262996066da213..4ac92d995acaa5723d79b68a0a4500785fade728 100644
--- a/doc/slides/users/figures/entities2.eps
+++ b/doc/slides/users/figures/entities2.eps
@@ -154,7 +154,7 @@ start_ol
 1773 2440 1920 2285 1920 1954 curveto
 1920 1627 1773 1472 1454 1472 curveto
 896 1472 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.335600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2113 98 moveto
@@ -182,7 +182,7 @@ start_ol
 1186 997 lineto
 1325 1022 1348 1030 1408 1059 curveto
 1408 920 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.631933 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -196,7 +196,7 @@ start_ol
 1179 2176 953 2009 832 1719 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.843600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -216,7 +216,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.021400 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -229,7 +229,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.173800 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -249,7 +249,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.351600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -262,7 +262,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.504000 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1212 2176 moveto
@@ -275,7 +275,7 @@ start_ol
 1728 712 1518 456 1216 456 curveto
 910 456 704 712 704 1088 curveto
 704 1464 910 1720 1216 1720 curveto
-end_ol grestore 
+end_ol grestore
 gsave 5.834200 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -291,10 +291,10 @@ start_ol
 1197 2176 996 2070 832 1836 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 6.164400 11.750000 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 6.316800 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 960 1972 moveto
@@ -305,7 +305,7 @@ start_ol
 1064 2520 769 2347 275 2347 curveto
 275 1972 lineto
 960 1972 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.191900 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 896 1024 moveto
@@ -322,7 +322,7 @@ start_ol
 1773 2440 1920 2285 1920 1954 curveto
 1920 1627 1773 1472 1454 1472 curveto
 896 1472 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.547500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2113 98 moveto
@@ -350,7 +350,7 @@ start_ol
 1186 997 lineto
 1325 1022 1348 1030 1408 1059 curveto
 1408 920 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.843833 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -364,7 +364,7 @@ start_ol
 1179 2176 953 2009 832 1719 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.055500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -384,7 +384,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.233300 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -397,7 +397,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.385700 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -417,7 +417,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.563500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -430,7 +430,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.715900 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1212 2176 moveto
@@ -443,7 +443,7 @@ start_ol
 1728 712 1518 456 1216 456 curveto
 910 456 704 712 704 1088 curveto
 704 1464 910 1720 1216 1720 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.046100 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -459,10 +459,10 @@ start_ol
 1197 2176 996 2070 832 1836 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.376300 11.750000 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 13.528700 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2112 504 moveto
@@ -480,7 +480,7 @@ start_ol
 282 749 128 506 128 0 curveto
 2112 0 lineto
 2112 504 lineto
-end_ol grestore 
+end_ol grestore
 0.010000 slw
 [] 0 sd
 [] 0 sd
@@ -514,7 +514,7 @@ start_ol
 1370 0 1664 256 1664 707 curveto
 1664 2496 lineto
 1152 2496 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.106067 6.125000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -527,7 +527,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 2.385467 6.125000 translate 0.035278 -0.035278 scale
 start_ol
 192 2496 moveto
@@ -546,7 +546,7 @@ start_ol
 1472 621 1313 404 1088 404 curveto
 857 404 704 615 704 934 curveto
 704 1238 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.102067 9.500000 translate 0.035278 -0.035278 scale
 start_ol
 1152 2496 moveto
@@ -561,7 +561,7 @@ start_ol
 1370 0 1664 256 1664 707 curveto
 1664 2496 lineto
 1152 2496 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.356067 9.500000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -574,7 +574,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.635467 9.500000 translate 0.035278 -0.035278 scale
 start_ol
 192 2496 moveto
@@ -593,7 +593,7 @@ start_ol
 1472 621 1313 404 1088 404 curveto
 857 404 704 615 704 934 curveto
 704 1238 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 1.000000 1.000000 1.000000 srgb
 n 3.500000 3.000000 m 3.500000 4.175000 l 5.625000 4.175000 l 5.625000 3.000000 l f
 0.100000 slw
@@ -617,7 +617,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -630,7 +630,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -649,7 +649,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -669,7 +669,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 3.975000 m 3.375000 4.275000 l 5.750000 4.275000 l 5.750000 3.975000 l f
 0.050000 slw
@@ -701,7 +701,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -714,7 +714,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -733,7 +733,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -753,7 +753,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 3.975000 m 6.375000 4.275000 l 8.750000 4.275000 l 8.750000 3.975000 l f
 0.050000 slw
@@ -785,7 +785,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -798,7 +798,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -817,7 +817,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -837,7 +837,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 7.975000 m 3.375000 8.275000 l 5.750000 8.275000 l 5.750000 7.975000 l f
 0.050000 slw
@@ -869,7 +869,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -882,7 +882,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -901,7 +901,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -921,7 +921,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 7.975000 m 6.375000 8.275000 l 8.750000 8.275000 l 8.750000 7.975000 l f
 0.050000 slw
@@ -953,7 +953,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -966,7 +966,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -985,7 +985,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1005,7 +1005,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 9.975000 m 3.375000 10.275000 l 5.750000 10.275000 l 5.750000 9.975000 l f
 0.050000 slw
@@ -1037,7 +1037,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1050,7 +1050,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1069,7 +1069,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1089,7 +1089,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 9.975000 m 6.375000 10.275000 l 8.750000 10.275000 l 8.750000 9.975000 l f
 0.050000 slw
@@ -1121,7 +1121,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1134,7 +1134,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1153,7 +1153,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1173,7 +1173,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 5.975000 m 3.375000 6.275000 l 5.750000 6.275000 l 5.750000 5.975000 l f
 0.050000 slw
@@ -1205,7 +1205,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1218,7 +1218,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1237,7 +1237,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1257,7 +1257,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 5.975000 m 6.375000 6.275000 l 8.750000 6.275000 l 8.750000 5.975000 l f
 0.050000 slw
@@ -1289,7 +1289,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1302,7 +1302,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1321,7 +1321,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1341,7 +1341,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 3.975000 m 10.375000 4.275000 l 12.750000 4.275000 l 12.750000 3.975000 l f
 0.050000 slw
@@ -1373,7 +1373,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1386,7 +1386,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1405,7 +1405,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1425,7 +1425,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 5.975000 m 10.375000 6.275000 l 12.750000 6.275000 l 12.750000 5.975000 l f
 0.050000 slw
@@ -1457,7 +1457,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1470,7 +1470,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1489,7 +1489,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1509,7 +1509,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 7.975000 m 10.375000 8.275000 l 12.750000 8.275000 l 12.750000 7.975000 l f
 0.050000 slw
@@ -1541,7 +1541,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1554,7 +1554,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1573,7 +1573,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1593,7 +1593,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 9.975000 m 10.375000 10.275000 l 12.750000 10.275000 l 12.750000 9.975000 l f
 0.050000 slw
diff --git a/doc/slides/users/figures/entities3.eps b/doc/slides/users/figures/entities3.eps
index 6cba9947edd2c2d8d6224a291bb06dc014b17744..0ccaf5e18a96ea5ae7197884ea8a1251b8df3c18 100644
--- a/doc/slides/users/figures/entities3.eps
+++ b/doc/slides/users/figures/entities3.eps
@@ -154,7 +154,7 @@ start_ol
 1773 2440 1920 2285 1920 1954 curveto
 1920 1627 1773 1472 1454 1472 curveto
 896 1472 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.335600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2113 98 moveto
@@ -182,7 +182,7 @@ start_ol
 1186 997 lineto
 1325 1022 1348 1030 1408 1059 curveto
 1408 920 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.631933 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -196,7 +196,7 @@ start_ol
 1179 2176 953 2009 832 1719 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.843600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -216,7 +216,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.021400 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -229,7 +229,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.173800 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -249,7 +249,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.351600 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -262,7 +262,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 5.504000 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1212 2176 moveto
@@ -275,7 +275,7 @@ start_ol
 1728 712 1518 456 1216 456 curveto
 910 456 704 712 704 1088 curveto
 704 1464 910 1720 1216 1720 curveto
-end_ol grestore 
+end_ol grestore
 gsave 5.834200 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -291,10 +291,10 @@ start_ol
 1197 2176 996 2070 832 1836 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 6.164400 11.750000 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 6.316800 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 960 1972 moveto
@@ -305,7 +305,7 @@ start_ol
 1064 2520 769 2347 275 2347 curveto
 275 1972 lineto
 960 1972 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.191900 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 896 1024 moveto
@@ -322,7 +322,7 @@ start_ol
 1773 2440 1920 2285 1920 1954 curveto
 1920 1627 1773 1472 1454 1472 curveto
 896 1472 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.547500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2113 98 moveto
@@ -350,7 +350,7 @@ start_ol
 1186 997 lineto
 1325 1022 1348 1030 1408 1059 curveto
 1408 920 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.843833 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -364,7 +364,7 @@ start_ol
 1179 2176 953 2009 832 1719 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.055500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -384,7 +384,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.233300 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -397,7 +397,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.385700 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1210 2112 moveto
@@ -417,7 +417,7 @@ start_ol
 896 1728 lineto
 1210 1728 lineto
 1210 2112 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.563500 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 832 2181 moveto
@@ -430,7 +430,7 @@ start_ol
 256 2440 lineto
 832 2440 lineto
 832 2944 lineto
-end_ol grestore 
+end_ol grestore
 gsave 12.715900 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 1212 2176 moveto
@@ -443,7 +443,7 @@ start_ol
 1728 712 1518 456 1216 456 curveto
 910 456 704 712 704 1088 curveto
 704 1464 910 1720 1216 1720 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.046100 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 256 2176 moveto
@@ -459,10 +459,10 @@ start_ol
 1197 2176 996 2070 832 1836 curveto
 832 2176 lineto
 256 2176 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.376300 11.750000 translate 0.035278 -0.035278 scale
 start_ol
-end_ol grestore 
+end_ol grestore
 gsave 13.528700 11.750000 translate 0.035278 -0.035278 scale
 start_ol
 2112 504 moveto
@@ -480,7 +480,7 @@ start_ol
 282 749 128 506 128 0 curveto
 2112 0 lineto
 2112 504 lineto
-end_ol grestore 
+end_ol grestore
 0.010000 slw
 [] 0 sd
 [] 0 sd
@@ -514,7 +514,7 @@ start_ol
 1370 0 1664 256 1664 707 curveto
 1664 2496 lineto
 1152 2496 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.106067 6.125000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -527,7 +527,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 2.385467 6.125000 translate 0.035278 -0.035278 scale
 start_ol
 192 2496 moveto
@@ -546,7 +546,7 @@ start_ol
 1472 621 1313 404 1088 404 curveto
 857 404 704 615 704 934 curveto
 704 1238 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.102067 9.500000 translate 0.035278 -0.035278 scale
 start_ol
 1152 2496 moveto
@@ -561,7 +561,7 @@ start_ol
 1370 0 1664 256 1664 707 curveto
 1664 2496 lineto
 1152 2496 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.356067 9.500000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -574,7 +574,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.635467 9.500000 translate 0.035278 -0.035278 scale
 start_ol
 192 2496 moveto
@@ -593,7 +593,7 @@ start_ol
 1472 621 1313 404 1088 404 curveto
 857 404 704 615 704 934 curveto
 704 1238 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 0.010000 slw
 [] 0 sd
 [] 0 sd
@@ -618,7 +618,7 @@ start_ol
 1370 0 1664 256 1664 707 curveto
 1664 2496 lineto
 1152 2496 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.356067 3.250000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -631,7 +631,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 2.635467 3.250000 translate 0.035278 -0.035278 scale
 start_ol
 192 2496 moveto
@@ -650,7 +650,7 @@ start_ol
 1472 621 1313 404 1088 404 curveto
 857 404 704 615 704 934 curveto
 704 1238 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 2.013167 3.850000 translate 0.035278 -0.035278 scale
 start_ol
 2176 1719 moveto
@@ -671,7 +671,7 @@ start_ol
 640 1968 823 2081 1111 2081 curveto
 1466 2081 1664 1954 1664 1719 curveto
 2176 1719 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.317967 3.850000 translate 0.035278 -0.035278 scale
 start_ol
 1038 1856 moveto
@@ -691,7 +691,7 @@ start_ol
 768 1536 lineto
 1038 1536 lineto
 1038 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.470367 3.850000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -711,7 +711,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 gsave 2.724367 3.850000 translate 0.035278 -0.035278 scale
 start_ol
 704 1856 moveto
@@ -730,7 +730,7 @@ start_ol
 1472 617 1313 404 1088 404 curveto
 860 404 704 614 704 926 curveto
 704 1239 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 0.010000 slw
 [] 0 sd
 [] 0 sd
@@ -755,7 +755,7 @@ start_ol
 1370 0 1664 256 1664 707 curveto
 1664 2496 lineto
 1152 2496 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.356067 4.250000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -768,7 +768,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.635467 4.250000 translate 0.035278 -0.035278 scale
 start_ol
 192 2496 moveto
@@ -787,7 +787,7 @@ start_ol
 1472 621 1313 404 1088 404 curveto
 857 404 704 615 704 934 curveto
 704 1238 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 13.013167 4.850000 translate 0.035278 -0.035278 scale
 start_ol
 2176 1719 moveto
@@ -808,7 +808,7 @@ start_ol
 640 1968 823 2081 1111 2081 curveto
 1466 2081 1664 1954 1664 1719 curveto
 2176 1719 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.317967 4.850000 translate 0.035278 -0.035278 scale
 start_ol
 1038 1856 moveto
@@ -828,7 +828,7 @@ start_ol
 768 1536 lineto
 1038 1536 lineto
 1038 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.470367 4.850000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -848,7 +848,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 gsave 13.724367 4.850000 translate 0.035278 -0.035278 scale
 start_ol
 704 1856 moveto
@@ -867,7 +867,7 @@ start_ol
 1472 617 1313 404 1088 404 curveto
 860 404 704 614 704 926 curveto
 704 1239 860 1452 1088 1452 curveto
-end_ol grestore 
+end_ol grestore
 1.000000 1.000000 1.000000 srgb
 n 3.500000 3.000000 m 3.500000 4.175000 l 5.625000 4.175000 l 5.625000 3.000000 l f
 0.100000 slw
@@ -891,7 +891,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -904,7 +904,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -923,7 +923,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -943,7 +943,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 3.975000 m 3.375000 4.275000 l 5.750000 4.275000 l 5.750000 3.975000 l f
 0.050000 slw
@@ -975,7 +975,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -988,7 +988,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1007,7 +1007,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1027,7 +1027,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 3.975000 m 6.375000 4.275000 l 8.750000 4.275000 l 8.750000 3.975000 l f
 0.050000 slw
@@ -1059,7 +1059,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1072,7 +1072,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1091,7 +1091,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1111,7 +1111,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 7.975000 m 3.375000 8.275000 l 5.750000 8.275000 l 5.750000 7.975000 l f
 0.050000 slw
@@ -1143,7 +1143,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1156,7 +1156,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1175,7 +1175,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1195,7 +1195,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 7.975000 m 6.375000 8.275000 l 8.750000 8.275000 l 8.750000 7.975000 l f
 0.050000 slw
@@ -1227,7 +1227,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1240,7 +1240,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1259,7 +1259,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1279,7 +1279,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 9.975000 m 3.375000 10.275000 l 5.750000 10.275000 l 5.750000 9.975000 l f
 0.050000 slw
@@ -1311,7 +1311,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1324,7 +1324,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1343,7 +1343,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1363,7 +1363,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 9.975000 m 6.375000 10.275000 l 8.750000 10.275000 l 8.750000 9.975000 l f
 0.050000 slw
@@ -1395,7 +1395,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 4.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1408,7 +1408,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1427,7 +1427,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 4.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1447,7 +1447,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 3.375000 5.975000 m 3.375000 6.275000 l 5.750000 6.275000 l 5.750000 5.975000 l f
 0.050000 slw
@@ -1479,7 +1479,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 7.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1492,7 +1492,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1511,7 +1511,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 7.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1531,7 +1531,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 6.375000 5.975000 m 6.375000 6.275000 l 8.750000 6.275000 l 8.750000 5.975000 l f
 0.050000 slw
@@ -1563,7 +1563,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1576,7 +1576,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1595,7 +1595,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 3.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1615,7 +1615,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 3.975000 m 10.375000 4.275000 l 12.750000 4.275000 l 12.750000 3.975000 l f
 0.050000 slw
@@ -1647,7 +1647,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1660,7 +1660,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1679,7 +1679,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 5.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1699,7 +1699,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 5.975000 m 10.375000 6.275000 l 12.750000 6.275000 l 12.750000 5.975000 l f
 0.050000 slw
@@ -1731,7 +1731,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1744,7 +1744,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1763,7 +1763,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 7.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1783,7 +1783,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 7.975000 m 10.375000 8.275000 l 12.750000 8.275000 l 12.750000 7.975000 l f
 0.050000 slw
@@ -1815,7 +1815,7 @@ start_ol
 1020 1856 846 1766 704 1565 curveto
 704 1856 lineto
 192 1856 lineto
-end_ol grestore 
+end_ol grestore
 gsave 11.300033 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1053 1856 moveto
@@ -1828,7 +1828,7 @@ start_ol
 1472 608 1302 391 1056 391 curveto
 807 391 640 608 640 928 curveto
 640 1248 807 1465 1056 1465 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.579433 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1408 0 moveto
@@ -1847,7 +1847,7 @@ start_ol
 1408 615 1252 404 1024 404 curveto
 796 404 640 618 640 925 curveto
 640 1235 796 1452 1024 1452 curveto
-end_ol grestore 
+end_ol grestore
 gsave 11.858833 9.750000 translate 0.035278 -0.035278 scale
 start_ol
 1811 768 moveto
@@ -1867,7 +1867,7 @@ start_ol
 1054 1486 1165 1431 1234 1340 curveto
 1279 1276 1299 1208 1310 1088 curveto
 567 1088 lineto
-end_ol grestore 
+end_ol grestore
 0.800000 0.800000 0.800000 srgb
 n 10.375000 9.975000 m 10.375000 10.275000 l 12.750000 10.275000 l 12.750000 9.975000 l f
 0.050000 slw
diff --git a/doc/slides/users/figures/linux_llnl.png b/doc/slides/users/figures/linux_llnl.png
index fb3d74b01eb0680be54091e541623399dd0f6601..2c1018b385eb83721165c12f96e3636a26ef6bbe 100644
Binary files a/doc/slides/users/figures/linux_llnl.png and b/doc/slides/users/figures/linux_llnl.png differ
diff --git a/doc/slides/users/figures/linux_llnl2.png b/doc/slides/users/figures/linux_llnl2.png
index 2a84084f2621392feef93246e8b952977e0fa7e8..6feb715bb73a0944877641e0b273386222d86489 100644
Binary files a/doc/slides/users/figures/linux_llnl2.png and b/doc/slides/users/figures/linux_llnl2.png differ
diff --git a/doc/slides/users/figures/penguin.eps b/doc/slides/users/figures/penguin.eps
index 46819fc95ef19371934999910391bd01a47c0eee..48b94b89a2fc183b89fb4a375d1c90e189d2c4ce 100644
--- a/doc/slides/users/figures/penguin.eps
+++ b/doc/slides/users/figures/penguin.eps
@@ -17,7 +17,7 @@
 %%BeginProlog
 %%BeginResource: procset Adobe_packedarray 2.0 0
 %%Title: (Packed Array Operators)
-%%Version: 2.0 
+%%Version: 2.0
 %%CreationDate: (8/2/90) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 userdict /Adobe_packedarray 5 dict dup begin put
@@ -60,7 +60,7 @@ currentdict readonly pop end
 Adobe_packedarray /initialize get exec
 %%BeginResource: procset Adobe_cshow 1.1 0
 %%Title: (cshow Operator)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (1/23/89) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -106,7 +106,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_customcolor 1.0 0
 %%Title: (Custom Color Operators)
-%%Version: 1.0 
+%%Version: 1.0
 %%CreationDate: (5/9/88) ()
 %%Copyright: ((C) 1987-1990 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -162,7 +162,7 @@ setpacking
 %%EndResource
 %%BeginResource: procset Adobe_IllustratorA_AI3 1.1 3
 %%Title: (Adobe Illustrator (R) Version 3.0 Abbreviated Prolog)
-%%Version: 1.1 
+%%Version: 1.1
 %%CreationDate: (3/7/1994) ()
 %%Copyright: ((C) 1987-1994 Adobe Systems Incorporated All Rights Reserved)
 currentpacking true setpacking
@@ -181,7 +181,7 @@ userdict /Adobe_IllustratorA_AI3_vars 58 dict dup begin put
 /_doClip 0 def
 /cf	currentflat def
 /_tm matrix def
-/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def 
+/_renderStart [/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0] def
 /_renderEnd [null null null null /i1 /i1 /i1 /i1] def
 /_render -1 def
 /_rise 0 def
@@ -268,9 +268,9 @@ exch 5 -1 roll 3 index mul add
 /swj
 {
 dup 4 1 roll
-dup length exch stringwidth 
+dup length exch stringwidth
 exch 5 -1 roll 3 index mul add
-4 1 roll 3 1 roll mul add 
+4 1 roll 3 1 roll mul add
 6 2 roll /_cnt 0 ddef
 {1 index eq {/_cnt _cnt 1 add ddef} if} forall pop
 exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
@@ -279,7 +279,7 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
+	2 npop
 	(0) exch 2 copy 0 exch put pop
 	gsave
 	false charpath currentpoint
@@ -295,12 +295,12 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 {
 4 1 roll
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
+	2 npop
+	(0) exch 2 copy 0 exch put
 	gsave
-	_sp eq 
+	_sp eq
 		{
-		exch 6 index 6 index 6 index 5 -1 roll widthshow  
+		exch 6 index 6 index 6 index 5 -1 roll widthshow
 		currentpoint
 		}
 		{
@@ -326,11 +326,11 @@ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
 /jsp
 {
 	{
-	2 npop 
-	(0) exch 2 copy 0 exch put 
-	_sp eq 
+	2 npop
+	(0) exch 2 copy 0 exch put
+	_sp eq
 		{
-		exch 5 index 5 index 5 index 5 -1 roll widthshow  
+		exch 5 index 5 index 5 index 5 -1 roll widthshow
 		}
 		{
 		false charpath
@@ -445,11 +445,11 @@ closepath
 } def
 /N
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq {clip /_doClip 0 ddef} if 
+	_doClip 1 eq {clip /_doClip 0 ddef} if
 	newpath
-	} 
+	}
 	{
 	/CRender {N} ddef
 	}ifelse
@@ -458,17 +458,17 @@ _pola 0 eq
 {N} def
 /F
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _pf grestore clip newpath /_lp /none ddef _fc 
+		gsave _pf grestore clip newpath /_lp /none ddef _fc
 		/_doClip 0 ddef
 		}
 		{
 		_pf
 		}ifelse
-	} 
+	}
 	{
 	/CRender {F} ddef
 	}ifelse
@@ -480,17 +480,17 @@ F
 } def
 /S
 {
-_pola 0 eq 
+_pola 0 eq
 	{
-	_doClip 1 eq 
+	_doClip 1 eq
 		{
-		gsave _ps grestore clip newpath /_lp /none ddef _sc 
+		gsave _ps grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
 		}
 		{
 		_ps
 		}ifelse
-	} 
+	}
 	{
 	/CRender {S} ddef
 	}ifelse
@@ -502,14 +502,14 @@ S
 } def
 /B
 {
-_pola 0 eq 
+_pola 0 eq
 	{
 	_doClip 1 eq
-	gsave F grestore 
+	gsave F grestore
 		{
 		gsave S grestore clip newpath /_lp /none ddef _sc
 		/_doClip 0 ddef
-		} 
+		}
 		{
 		S
 		}ifelse
@@ -529,10 +529,10 @@ B
 } def
 /*
 {
-count 0 ne 
+count 0 ne
 	{
 	dup type (stringtype) eq {pop} if
-	} if 
+	} if
 _pola 0 eq {newpath} if
 } def
 /u
@@ -553,7 +553,7 @@ _pola 1 add /_pola exch ddef
 } def
 /*U
 {
-_pola 1 sub /_pola exch ddef 
+_pola 1 sub /_pola exch ddef
 _pola 0 eq {CRender} if
 } def
 /D
@@ -601,7 +601,7 @@ _i restore
 {
 /_gf exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
@@ -719,7 +719,7 @@ jss
 findcmykcustomcolor
 /_if exch ddef
 /_fc
-{ 
+{
 _lp /fill ne
 	{
 	_of setoverprint
diff --git a/doc/slides/users/figures/slides.ps b/doc/slides/users/figures/slides.ps
index 19110208332d854652f5daa506bd42e01021de29..fab602b503e40caa95cefdcf6a11e3a3498d0a77 100644
--- a/doc/slides/users/figures/slides.ps
+++ b/doc/slides/users/figures/slides.ps
@@ -510,15 +510,15 @@ end
 %
 % This is the encoding vector for Type1 and TrueType fonts to be used
 % with TeX.  This file is part of the PSNFSS bundle, version 9
-% 
+%
 % Authors: S. Rahtz, P. MacKay, Alan Jeffrey, B. Horn, K. Berry, W. Schmidt
 %
 % Idea is to have all the characters normally included in Type 1 fonts
 % available for typesetting. This is effectively the characters in Adobe
 % Standard Encoding + ISO Latin 1 + extra characters from Lucida + Euro.
-% 
+%
 % Character code assignments were made as follows:
-% 
+%
 % (1) the Windows ANSI characters are almost all in their Windows ANSI
 % positions, because some Windows users cannot easily reencode the
 % fonts, and it makes no difference on other systems. The only Windows
@@ -526,40 +526,40 @@ end
 % typesetting -- rubout (127 decimal), nobreakspace (160), softhyphen
 % (173). quotesingle and grave are moved just because it's such an
 % irritation not having them in TeX positions.
-% 
+%
 % (2) Remaining characters are assigned arbitrarily to the lower part
 % of the range, avoiding 0, 10 and 13 in case we meet dumb software.
-% 
+%
 % (3) Y&Y Lucida Bright includes some extra text characters; in the
 % hopes that other PostScript fonts, perhaps created for public
 % consumption, will include them, they are included starting at 0x12.
-% 
+%
 % (4) Remaining positions left undefined are for use in (hopefully)
 % upward-compatible revisions, if someday more characters are generally
 % available.
-% 
+%
 % (5) hyphen appears twice for compatibility with both ASCII and Windows.
 %
 % (6) /Euro is assigned to 128, as in Windows ANSI
-% 
+%
 /TeXBase1Encoding [
 % 0x00 (encoded characters from Adobe Standard not in Windows 3.1)
   /.notdef /dotaccent /fi /fl
   /fraction /hungarumlaut /Lslash /lslash
   /ogonek /ring /.notdef
-  /breve /minus /.notdef 
+  /breve /minus /.notdef
 % These are the only two remaining unencoded characters, so may as
 % well include them.
-  /Zcaron /zcaron 
+  /Zcaron /zcaron
 % 0x10
- /caron /dotlessi 
+ /caron /dotlessi
 % (unusual TeX characters available in, e.g., Lucida Bright)
- /dotlessj /ff /ffi /ffl 
+ /dotlessj /ff /ffi /ffl
  /.notdef /.notdef /.notdef /.notdef
  /.notdef /.notdef /.notdef /.notdef
  % very contentious; it's so painful not having quoteleft and quoteright
  % at 96 and 145 that we move the things normally found there down to here.
- /grave /quotesingle 
+ /grave /quotesingle
 % 0x20 (ASCII begins)
  /space /exclam /quotedbl /numbersign
  /dollar /percent /ampersand /quoteright
@@ -713,7 +713,7 @@ sub sub /pdf@voff exch def } def /H.R { currentpoint HyperBorder sub
 pop vsize 72 sub exch DvipsToPDF sub /pdf@voff exch def } def systemdict
 /pdfmark known not {userdict /pdfmark systemdict /cleartomark get put}
 if
- 
+
 @fedspecial end TeXDict begin
 40258431 52099146 2074 600 600 (slides.dvi) @start /Fa
 134[124 124 2[138 69 124 83 138 138 138 138 207 55 1[55
@@ -761,7 +761,7 @@ a 5073 5340 a @beginspecial @setspecial
  tx@Dict begin STP newpath 0.8 SLW 0. setgray  0.25 true -25.60728
 -25.60728 348.5461 253.94081 .5 Frame  gsave 0. setgray fill grestore
 gsave 0.8 SLW 0. setgray 0 setlinecap stroke  grestore end
- 
+
 @endspecial
 4828 2192 a @beginspecial 14 @llx 14 @lly 513 @urx 49
 @ury 3401 @rwi 99 @rhi @setspecial
@@ -3833,7 +3833,7 @@ a @beginspecial @setspecial
  tx@Dict begin STP newpath 0.8 SLW 0. setgray  0.25 true -25.60728
 -25.60728 348.5461 253.94081 .5 Frame  gsave 0. setgray fill grestore
 gsave 0.8 SLW 0. setgray 0 setlinecap stroke  grestore end
- 
+
 @endspecial 4828 2192 a
 @beginspecial 14 @llx 14 @lly 513 @urx 49 @ury 3401 @rwi
 99 @rhi @setspecial
diff --git a/doc/slides/users/slides.tex b/doc/slides/users/slides.tex
index ad987272eee8bf30ae05bbbcf8d8a03ed869978c..6ab45117d836fc4413c7c9f03461999d1a9c04c4 100644
--- a/doc/slides/users/slides.tex
+++ b/doc/slides/users/slides.tex
@@ -1,15 +1,15 @@
 %
-% Slides for SLURM talk at User's meeting 
+% Slides for SLURM talk at User's meeting
 %                on
 %            2003-04-08
 %
 % This presentation was built using a modified version of the
-% ``prosper'' latex class and `alienglow' style 
+% ``prosper'' latex class and `alienglow' style
 % (See ./tex/prosper.cls and ./tex/PPRlinuxglow.sty)
 %
 % For the "interactive" PDF slides, uncomment the pdf option below
 % to enable overlays. For printed slides, uncomment the ps option.
-% 
+%
 \documentclass[%
 letterpaper,
 pdf,
@@ -66,7 +66,7 @@ alienglow
 
 %
 % Prepare a 5 overlay slide using the handy "itemstep" environment
-% 
+%
 \overlays{5}{%
 \begin{slide}{What is SLURM?}
 \begin{itemstep}
@@ -74,7 +74,7 @@ alienglow
   \begin{itemstep}
     \item Allocates access to resources (nodes)
     \item Distributes work to allocated resources.
-    \item Manages conflicting requests for resources by maintaining a 
+    \item Manages conflicting requests for resources by maintaining a
           simple queue of pending work.
   \end{itemstep}
   \item Open Source replacement for RMS
@@ -85,7 +85,7 @@ alienglow
 \overlays{3}{%
 \begin{slide}{What SLURM is Not}
 \begin{itemstep}
-  \item A sophisticated job scheduler. 
+  \item A sophisticated job scheduler.
   \item A meta-batch system.
   \item A comprehensive cluster administration or monitoring package.
 \end{itemstep}
@@ -237,10 +237,10 @@ grondo@dev0 ~ >
 \begin{slide}{SLURM and RMS Differences}
 \raggedright
 \begin{itemize}
-\item SLURM infrastructure is much simpler 
+\item SLURM infrastructure is much simpler
   \begin{itemize}
   \item SLURM uses a simple configuration file not database
-  \item Initially, no back-end database \\ 
+  \item Initially, no back-end database \\
         (future versions of SLURM may have this)
   \end{itemize}
 \item SLURM Open Source and locally developed to fit our needs
diff --git a/doc/slides/users/tex/PPRalienglow.sty b/doc/slides/users/tex/PPRalienglow.sty
index ae7c9bac2011a5c3fc2a155f57d7ca0b38b97a3c..bdcc0fa36bd3b44730f9ae34dfb8c86f7bbfd2e6 100644
--- a/doc/slides/users/tex/PPRalienglow.sty
+++ b/doc/slides/users/tex/PPRalienglow.sty
@@ -7,7 +7,7 @@
 %% of this license or (at your option) any later version.
 %% The latest version of this license is in
 %%   http://www.latex-project.org/lppl.txt
-%% and version 1.2 or later is part of all distributions of LaTeX 
+%% and version 1.2 or later is part of all distributions of LaTeX
 %% version 1999/12/01 or later.
 %%
 %% This program consists of the files listed in manifest.txt
@@ -63,7 +63,7 @@
 
 \endinput
 
-%%% Local Variables: 
+%%% Local Variables:
 %%% mode: latex
 %%% TeX-master: t
-%%% End: 
+%%% End:
diff --git a/doc/slides/users/tex/prosper.cls b/doc/slides/users/tex/prosper.cls
index 60fde7aa9d0a6068acf1c8a4ca563b6ac216d3e1..b0fc0923eee3abc1f4eac65aac92d1c8728b8c29 100644
--- a/doc/slides/users/tex/prosper.cls
+++ b/doc/slides/users/tex/prosper.cls
@@ -7,7 +7,7 @@
 %% of this license or (at your option) any later version.
 %% The latest version of this license is in
 %%   http://www.latex-project.org/lppl.txt
-%% and version 1.2 or later is part of all distributions of LaTeX 
+%% and version 1.2 or later is part of all distributions of LaTeX
 %% version 1999/12/01 or later.
 %%
 %% This program consists of the files listed in manifest.txt
@@ -107,9 +107,9 @@
 % Add a bookmark for each master slide
 \def\@addBookmarkOnSlide#1#2{%
   \ifnum#1=0
-  \def\Hy@temp{#2}% 
+  \def\Hy@temp{#2}%
     \pdfmark{pdfmark=/OUT,
-      Raw={/Page \thetrueSlideCounter 
+      Raw={/Page \thetrueSlideCounter
         /View [ /XYZ null null null ]
         /Title (\expandafter\strip@prefix\meaning\Hy@temp)}
     }%
@@ -119,9 +119,9 @@
   \ifcollapsedBookmarks
     \@tempoLimit=-\@tempoLimit
   \fi
-  \def\Hy@temp{#2}% 
+  \def\Hy@temp{#2}%
     \pdfmark{pdfmark=/OUT,
-      Raw={/Count \number\@tempoLimit /Page \thetrueSlideCounter 
+      Raw={/Count \number\@tempoLimit /Page \thetrueSlideCounter
         /View [ /XYZ null null null ]
         /Title (\expandafter\strip@prefix\meaning\Hy@temp)}
     }%
@@ -189,13 +189,13 @@
     no font specified for text in B\string&W}{Use macro \string\FontText}}
 \def\@colorFoot{\black}
 \newcommand{\FontTitle}[2]{%
-  \gdef\@fontTitleColor{#1}% 
+  \gdef\@fontTitleColor{#1}%
   \gdef\@fontTitleBW{#2}}
 \newcommand{\FontSubtitle}[2]{%
   \gdef\@fontSubtitleColor{#1}%
   \gdef\@fontSubtitleBW{#2}}
 \newcommand{\FontText}[2]{%
-  \gdef\@fontTextColor{#1}% 
+  \gdef\@fontTextColor{#1}%
   \gdef\@fontTextBW{#2}}
 \newcommand{\ColorFoot}[1]{\gdef\@colorFoot{#1}}
 
@@ -252,12 +252,12 @@
        \slidetitle{#2}%
     \fi
     \begin{posit@Box}%
-      \begin{minipage}{\slideWidth}%    
+      \begin{minipage}{\slideWidth}%
         \begin{raggedright}%
         \@DefMyItem
         \ifinColor\@fontTextColor\else\@fontTextBW\fi
         }{%
-      \par\end{raggedright}% 
+      \par\end{raggedright}%
     \end{minipage}
   \end{posit@Box}\end{slide@seminar}}
 % Slides in overlays
@@ -290,7 +290,7 @@
             \@DefMyItem
             \ifinColor\@fontTextColor\else\@fontTextBW\fi
             {\overlay{1}}%
-  \fi    
+  \fi
   }{%
   \ifDVItoPS
         \par\end{raggedright}%
@@ -321,7 +321,7 @@
 \def\@prosperItemiii{{\scriptsize\green%
       \raisebox{2pt}{\ensuremath{\bullet}}}}
 \newcommand{\myitem}[2]{%
-  \ifnum#1=1 
+  \ifnum#1=1
     \gdef\@prosperItemi{#2}%
   \else
     \ifnum#1=2
@@ -393,7 +393,7 @@
 \let\Itemize=\itemize
 \let\endItemize=\enditemize
 \let\orig@item=\item
-\renewenvironment{itemize}{\global\let\cur@item=\item 
+\renewenvironment{itemize}{\global\let\cur@item=\item
   \global\let\item=\orig@item%
   \begin{Itemize}\begin{raggedright}}{%
       \end{raggedright}\end{Itemize}\global\let\item=\cur@item}
@@ -450,7 +450,7 @@
 \renewcommand{\maketitle}{%
   %% Tests whether the 'french' style from B. Gaulle is loaded
   \expandafter\ifx\csname frenchTeXmods\endcsname\relax%
-  \else 
+  \else
   \PackageWarning{prosper}{This page intentionnally left blank to overcome an incompatibility bug between B. Gaulle 'french' package and the seminar class.}
   \begin{center}
     {\black\small
@@ -504,7 +504,7 @@
 
 
 % fromSlide, onlySlide, untilSlide
-% Masks or not the parameter. 
+% Masks or not the parameter.
 % The stared version does not advance the position pointer and does not
 % interpret its argument (to be used for graphics and commands containing
 % PStricks links)
@@ -517,7 +517,7 @@
 % [2001/07/16] PMN: The grouping and \ignorespaces around the argument
 %   is necessary for downwards compatibility.
 %     \ignorespaces is as found in \overlays from seminar and
-%     the grouping changes the spacing behavour in TeX.  
+%     the grouping changes the spacing behavour in TeX.
 
 \long\def\fromSlide{%
   \@ifstar\fromSlide@E\fromSlide@NE}
@@ -655,7 +655,7 @@
 \newif\ifInOverlays
 \InOverlaysfalse
 \long\def\overlays#1#2{%
-  \bgroup  
+  \bgroup
   \aftergroup\@cleanupOverlay
   \global\InOverlaystrue
   \setcounter{limitOverlays}{#1}%
@@ -664,8 +664,8 @@
     #2
   \else
     \begin{Overlays}%
-    \bgroup  
-    \loop 
+    \bgroup
+    \loop
       \@everyOverlay
       #2
     \ifnum\value{overlaysCount}<\value{limitOverlays}%
@@ -762,7 +762,7 @@
 }
 
 \def\@prepareResetCounters{%
-  \ifnum\value{nbAffected}>0 
+  \ifnum\value{nbAffected}>0
      \setcounter{savecountAffected}{1}
      \loop
        \csname @sac\the\c@savecountAffected\endcsname
@@ -795,7 +795,7 @@
   \prosper@next}
 % #1 appears on the slide only on PS mode
 \newcommand{\onlyInPS}{%
-  \ifDVItoPS 
+  \ifDVItoPS
     \let\prosper@next\@iden
    \else
     \let\prosper@next\@gobble
@@ -844,7 +844,7 @@
 
 \endinput
 
-%%% Local Variables: 
+%%% Local Variables:
 %%% mode: latex
 %%% TeX-master: t
-%%% End: 
+%%% End:
diff --git a/doc/survey/Makefile b/doc/survey/Makefile
index 6be471975767c728dce135ceef926d927173098d..3777b8f6843ca2f1890d06c6162e822622d46263 100644
--- a/doc/survey/Makefile
+++ b/doc/survey/Makefile
@@ -11,8 +11,8 @@
 REPORT = report
 
 TEX = $(REPORT).tex
-FIGS = 
-PLOTS = 
+FIGS =
+PLOTS =
 BIB = project.bib
 
 %.eps: %.gnuplot %.data
@@ -24,7 +24,7 @@ BIB = project.bib
 %.ps: %.dvi
 	dvips -t letter -o $(@F) $(<F)
 %.pdf: %.dvi
-	dvipdf $< $@ 
+	dvipdf $< $@
 
 all: $(REPORT).ps
 
@@ -38,5 +38,5 @@ $(REPORT).dvi: $(TEX) $(FIGS) $(PLOTS) $(BIB)
 view: $(REPORT).ps
 	ghostview $(REPORT) &
 
-clean: 
+clean:
 	rm -f *~ *.dvi *.log *.aux report.ps *.blg *.bbl #*.eps #*.gif
diff --git a/doc/survey/project.bib b/doc/survey/project.bib
index 8e6e8ab7b8a05749fbad063044468547a2936be3..d110140ad7e152d30dcf17843ee4913128a31506 100644
--- a/doc/survey/project.bib
+++ b/doc/survey/project.bib
@@ -1,6 +1,6 @@
 @TECHREPORT
 {
-    Res2000, 
+    Res2000,
     AUTHOR = "Brett Bode et al",
     TITLE  = "Resource management in a parallel World",
     INSTITUTION = "SciDAC",
@@ -9,35 +9,35 @@
 
 @TECHREPORT
 {
-    Uselton00, 
+    Uselton00,
     AUTHOR= "Andrew C. Uselton",
     TITLE = "The Raw Disk I/O Performance of Compaq StorageWorks RAID arrays
-under Tru64 UNIX", 
+under Tru64 UNIX",
     INSTITUTION = "Lawrence Livermore National Laboratory",
     YEAR        = 2000,
     NUMBER      = "UCRL-ID-141831"
 }
 @TECHREPORT
 {
-    Uselton01a, 
+    Uselton01a,
     AUTHOR= "Andrew C. Uselton",
-    TITLE = "The Performance of PFS, the Compaq Sierra Product's Parallel File System", 
+    TITLE = "The Performance of PFS, the Compaq Sierra Product's Parallel File System",
     INSTITUTION = "Lawrence Livermore National Laboratory",
     YEAR        = 2001,
     NUMBER      = "UCRL-ID-144018"
 }
 @TECHREPORT
 {
-    Garlick01a, 
+    Garlick01a,
     AUTHOR= "Jim Garlick and Andrew C. Uselton",
-    TITLE = "Towards a Linux-based, High-performance Computing Resource", 
+    TITLE = "Towards a Linux-based, High-performance Computing Resource",
     INSTITUTION = "Lawrence Livermore National Laboratory",
     YEAR        = 2001,
     NUMBER      = "UCRL-ID-000000"
 }
 @TECHREPORT
 {
-    Garlick01b, 
+    Garlick01b,
     AUTHOR= "Jim Garlick",
     TITLE = "Building a High Performance Raw Disk Subsystem for Alpha/Linux",
     INSTITUTION = "Lawrence Livermore National Laboratory",
@@ -71,7 +71,7 @@ under Tru64 UNIX",
 }
 @TECHREPORT
 {
-    Maletinsky00, 
+    Maletinsky00,
     AUTHOR= "M. Maletinsky",
     TITLE = "Implementing Read Ahead in Petal/Frangipani",
     INSTITUTION = "SCS",
@@ -80,7 +80,7 @@ under Tru64 UNIX",
 }
 @TECHREPORT
 {
-    Baettig00, 
+    Baettig00,
     AUTHOR= "R. Baettig",
     TITLE = "Petal Communication and RPC's",
     INSTITUTION = "SCS",
@@ -113,9 +113,9 @@ under Tru64 UNIX",
 }
 @INPROCEEDINGS
 {
-    Minnich00, 
+    Minnich00,
     AUTHOR= "Ron Minnich and James Hendricks and Dale Webster",
-    TITLE = "The Linux BIOS ", 
+    TITLE = "The Linux BIOS ",
     ORGANIZATION = "Los Alamos National Laboratory",
     BOOKTITLE = "The Fourth Annual Linux Showcase and Conference, Atlanta, GA",
     YEAR        = 2000,
@@ -126,7 +126,7 @@ under Tru64 UNIX",
     MPICH,
     author = "G. William and E. Lusk",
     title = "User's Guide for mpich, a Portable Implementation of MPI",
-    text = "Gropp, William and Ewing Lusk. User's Guide for mpich, 
+    text = "Gropp, William and Ewing Lusk. User's Guide for mpich,
             a Portable Implementation of MPI.",
     url = "http:// www.mcs.anl.gov/mpi/mpiuserguide/paper.html"
 }
@@ -177,7 +177,7 @@ under Tru64 UNIX",
 }
 @TECHREPORT
 {
-    Ruwart00, 
+    Ruwart00,
     AUTHOR= "Tom Ruwart and Alex Elder",
     TITLE = "SAN/CXFS Test Report to LLNL",
     INSTITUTION = "University of Minnesota, Laboratory for Computational Science and Engineering",
diff --git a/doc/survey/report.tex b/doc/survey/report.tex
index 8c526456aa3b3f6bff63568e14abf22a5b1611f8..d46aa6e24a433a83406bf0ce5c9a4b7a71699040 100644
--- a/doc/survey/report.tex
+++ b/doc/survey/report.tex
@@ -3,7 +3,7 @@
 \usepackage{epsfig}
 \usepackage{draftcopy}
 \author{Moe Jette, Chris Dunlap, Jim Garlick, Mark Grondona\\
-        \{jette,cdunlap,garlick,grondona\}@llnl.gov} 
+        \{jette,cdunlap,garlick,grondona\}@llnl.gov}
 \title{Survey of Batch/Resource Management-Related System Software}
 
 
@@ -13,16 +13,16 @@
 
 \begin{abstract}
 Simple Linux Utility for Resource Management (SLURM) is an open source,
-fault-tolerant, and highly scalable cluster management and job 
-scheduling system for Linux clusters of 
+fault-tolerant, and highly scalable cluster management and job
+scheduling system for Linux clusters of
 thousands of nodes.  Components include machine status, partition
-management, job management, and scheduling modules.  The design also 
+management, job management, and scheduling modules.  The design also
 includes a scalable, general-purpose communication infrastructure.
 Development will take place in four phases:  Phase I results in a solid
-infrastructure;  Phase II produces a functional but limited interactive 
-job initiation capability without use of the interconnect/switch; 
-Phase III provides switch support and documentation; Phase IV provides 
-job statusing, fault-tolerance, and job queueing and control through  
+infrastructure;  Phase II produces a functional but limited interactive
+job initiation capability without use of the interconnect/switch;
+Phase III provides switch support and documentation; Phase IV provides
+job statusing, fault-tolerance, and job queueing and control through
 Livermore's Distributed Production Control System (DPCS), a metabatch and
 resource management system.
 \end{abstract}
@@ -32,36 +32,36 @@ resource management system.
 \section{PBS (Portable Batch System)}
 
 The Portable Batch System (PBS)\footnote{http://www.openpbs.org/}
-is a flexible batch queuing and 
-workload management system originally developed by Veridian Systems 
-for NASA.  It operates on networked, multi-platform UNIX environments, 
-including heterogeneous clusters of workstations, supercomputers, and 
-massively parallel systems. PBS was developed as a replacement for 
+is a flexible batch queuing and
+workload management system originally developed by Veridian Systems
+for NASA.  It operates on networked, multi-platform UNIX environments,
+including heterogeneous clusters of workstations, supercomputers, and
+massively parallel systems. PBS was developed as a replacement for
 NQS (Network Queuing System) by many of the same people.
 
-PBS supports sophisticated scheduling logic (via the Maui 
-Scheduler\footnote{http://superclustergroup.org/maui}). 
-PBS spawn's daemons on each 
-machine to shepherd the job's tasks (similar to LoadLeveler 
-and Condor). It provides an interface for administrators to easily 
-interface their own scheduling modules (a nice feature).  PBS can support 
-long delays in file staging (in and out) with retry.  Host 
-authentication is provided by checking port numbers (low ports numbers are only 
-accessible to user root).  Credential service is used for user authentication. 
-It has the job prolog and epilog feature, which is useful.  PBS Supports 
-high priority queue for smaller "interactive" jobs.  Signal to daemons 
-causes current log file (e.g. accounting) to be closed, renamed with 
+PBS supports sophisticated scheduling logic (via the Maui
+Scheduler\footnote{http://superclustergroup.org/maui}).
+PBS spawn's daemons on each
+machine to shepherd the job's tasks (similar to LoadLeveler
+and Condor). It provides an interface for administrators to easily
+interface their own scheduling modules (a nice feature).  PBS can support
+long delays in file staging (in and out) with retry.  Host
+authentication is provided by checking port numbers (low ports numbers are only
+accessible to user root).  Credential service is used for user authentication.
+It has the job prolog and epilog feature, which is useful.  PBS Supports
+high priority queue for smaller "interactive" jobs.  Signal to daemons
+causes current log file (e.g. accounting) to be closed, renamed with
 time-stamp, and a new log file created.
 
-Specific complaints about PBS from members of the OSCAR group (Jeremy Enos, 
+Specific complaints about PBS from members of the OSCAR group (Jeremy Enos,
 Jeff Squyres, Tim Mattson):
 \begin{itemize}
-\item Sensitivity to hostname configuration on the server; improper 
-      configuration results in hard to diagnose failure modes.  Once 
+\item Sensitivity to hostname configuration on the server; improper
+      configuration results in hard to diagnose failure modes.  Once
       configuration is correct, this issue disappears.
-\item When a compute node in the system dies, everything slows down.  
+\item When a compute node in the system dies, everything slows down.
       PBS is single-threaded and continues to try to contact down nodes,
-      while other activities like scheduling jobs, answering qsub/qstat 
+      while other activities like scheduling jobs, answering qsub/qstat
       requests, etc., have to wait for a complete timeout cycle before being
       processed.
 \item Default scheduler is just FIFO, but Maui can be plugged in so this
@@ -69,17 +69,17 @@ Jeff Squyres, Tim Mattson):
 \item Weak mechanism for starting/cleaning up parallel jobs (pbsdsh).
       When a job is killed, pbsdsh kills the processes it started, but
       if the process doesn't die on the first shot it may continue on.
-\item PBS server continues to mark specific nodes offline, even though they 
+\item PBS server continues to mark specific nodes offline, even though they
       are healthy.  Restarting the server fixes this.
-\item Lingering jobs.  Jobs assigned to nodes, and then bounced back to the 
-      queue for any reason, maintain their assignment to those nodes, even 
-      if another job had already started on them.  This is a poor clean up 
+\item Lingering jobs.  Jobs assigned to nodes, and then bounced back to the
+      queue for any reason, maintain their assignment to those nodes, even
+      if another job had already started on them.  This is a poor clean up
       issue.
 \item When the PBS server process is restarted, it puts running jobs at risk.
-\item Poor diagnostic messages.  This problem can be as serious as ANY other 
-      problem.  This problem makes small, simple problems turn into huge 
-      turmoil occasionally.  For example, the variety of symptoms that arise 
-      from improper hostname configuration.  All the symptoms that result are 
+\item Poor diagnostic messages.  This problem can be as serious as ANY other
+      problem.  This problem makes small, simple problems turn into huge
+      turmoil occasionally.  For example, the variety of symptoms that arise
+      from improper hostname configuration.  All the symptoms that result are
       very misleading to the real problem.
 \item Rumored to have problems when the number of jobs in the queues gets
       large.
@@ -93,7 +93,7 @@ The one strength mentioned is PBS's portability and broad user base.
 
 PBS is owned by Veridian and is released as three separate products with
 different licenses: {\em PBS Pro} is a commercial product sold by Veridian;
-{\em OpenPBS} is an pseudo open source version of PBS that requires 
+{\em OpenPBS} is an pseudo open source version of PBS that requires
 registration; and
 {\em PBS} is a GPL-like, true open source version of PBS.
 
@@ -106,99 +106,99 @@ of some frustration.
 \section{Maui}
 
 Maui Scheduler\footnote{http://supercluster.org/maui}
-is an advance reservation HPC batch scheduler for use with SP, 
-O2K, and UNIX/Linux clusters. It is widely used to extend the 
+is an advance reservation HPC batch scheduler for use with SP,
+O2K, and UNIX/Linux clusters. It is widely used to extend the
 functionality of PBS and LoadLeveler
 
 \section{DPCS}
 
 The Distributed Production Control System (DPCS)\footnote{
 http://www.llnl.gov/icc/lc/dpcs/dpcs\_overview.html}
-is a resource manager developed by Lawrence Livermore National Laboratory (LLNL). 
-DPCS is (or will soon be) open source, although its use is presently 
-confined to LLNL. The development of DPCS began in 1990 and it has 
-evolved into a highly scalable and fault-tolerant meta-scheduler 
-operating on top of LoadLeveler, RMS, and NQS. DPCS provides: 
+is a resource manager developed by Lawrence Livermore National Laboratory (LLNL).
+DPCS is (or will soon be) open source, although its use is presently
+confined to LLNL. The development of DPCS began in 1990 and it has
+evolved into a highly scalable and fault-tolerant meta-scheduler
+operating on top of LoadLeveler, RMS, and NQS. DPCS provides:
 \begin{itemize}
-\item Basic data collection and reporting mechanisms for project-level, 
+\item Basic data collection and reporting mechanisms for project-level,
       near real-time accounting.
-\item Resource allocation to customers with established limits per 
-      customers' organizational budgets. 
-\item Proactive delivery of services to organizations that are relatively 
+\item Resource allocation to customers with established limits per
+      customers' organizational budgets.
+\item Proactive delivery of services to organizations that are relatively
       underserviced using a fair-share resource allocation scheme.
-\item Automated, highly flexible system with feedback for proactive delivery 
+\item Automated, highly flexible system with feedback for proactive delivery
       of resources.
 \item Even distribution of the workload across available computers.
 \item Flexible prioritization of production workload, including "run on demand."
 \item Dynamic reconfiguration and re-tuning.
-\item Graceful degradation in service to prevent overuse of a computer where 
+\item Graceful degradation in service to prevent overuse of a computer where
       not authorized.
 \end{itemize}
 
-While DPCS does have some attractive characteristics, it supports only a 
-limited number of computer systems: IBM RS/6000 and SP, Linux with RMS, 
+While DPCS does have some attractive characteristics, it supports only a
+limited number of computer systems: IBM RS/6000 and SP, Linux with RMS,
 Sun Solaris, and Compaq Alpha. DPCS also lacks commercial support.
 
 \section{LoadLeveler}
 
 LoadLeveler\footnote{
 http://www-1.ibm.com/servers/eserver/pseries/library/sp\_books/loadleveler.html}
-is a proprietary batch system and parallel job manager by 
-IBM. LoadLeveler supports few non-IBM systems. Very primitive 
-scheduling software exists and other software is required for reasonable 
-performance (e.g. Maui and DPCS). Many soft and hard limits are available. 
-A very flexible queue and job class structure is available operating in "matrix" fashion 
-(probably overly complex). Many configuration files exist with signals to 
-daemons used to update configuration (like LSF, good). All jobs must 
-be initiated through LoadLeveler (no real "interactive" jobs, just 
-high priority queue for smaller jobs). Job accounting is only available 
-on termination (very bad for long-running jobs). Good status 
-information on nodes and LoadLeveler daemons is available. LoadLeveler 
+is a proprietary batch system and parallel job manager by
+IBM. LoadLeveler supports few non-IBM systems. Very primitive
+scheduling software exists and other software is required for reasonable
+performance (e.g. Maui and DPCS). Many soft and hard limits are available.
+A very flexible queue and job class structure is available operating in "matrix" fashion
+(probably overly complex). Many configuration files exist with signals to
+daemons used to update configuration (like LSF, good). All jobs must
+be initiated through LoadLeveler (no real "interactive" jobs, just
+high priority queue for smaller jobs). Job accounting is only available
+on termination (very bad for long-running jobs). Good status
+information on nodes and LoadLeveler daemons is available. LoadLeveler
 allocates jobs either entire nodes or shared nodes ,depending upon configuration.
 
-A special version of MPI is required. LoadLeveler allocates 
-interconnect resources, spawns the user's processes, and manages the 
-job afterwards. Daemons also monitor the switch and node health using 
-a "heart-beat monitor." One fundamental problem is that when the 
-"Central Manager" restarts, it forgets about all nodes and jobs. They 
-appear in the database only after checking in via the heartbeat. It 
-needs to periodically write state to disk instead of doing 
-"cold-starts" after the daemon fails, which is rare. It has the job 
-prolog and epilog feature, which permits us to enable/disable logins 
+A special version of MPI is required. LoadLeveler allocates
+interconnect resources, spawns the user's processes, and manages the
+job afterwards. Daemons also monitor the switch and node health using
+a "heart-beat monitor." One fundamental problem is that when the
+"Central Manager" restarts, it forgets about all nodes and jobs. They
+appear in the database only after checking in via the heartbeat. It
+needs to periodically write state to disk instead of doing
+"cold-starts" after the daemon fails, which is rare. It has the job
+prolog and epilog feature, which permits us to enable/disable logins
 and remove stray processes.
 
-LoadLeveler evolved from Condor, or what was Condor a decade ago. 
-While I am less familiar with LSF and Condor than LoadLeveler, they 
-all appear very similar with LSF having the far more sophisticated 
-scheduler. We should carefully review their data structures and 
+LoadLeveler evolved from Condor, or what was Condor a decade ago.
+While I am less familiar with LSF and Condor than LoadLeveler, they
+all appear very similar with LSF having the far more sophisticated
+scheduler. We should carefully review their data structures and
 daemons before designing our own.
 
 \section{LSF (Load Sharing Facility)}
 
 LSF\footnote{http://www.platform.com/}
-is a proprietary batch system and parallel job manager by 
-Platform Computing. Widely deployed on a wide variety of computer 
-architectures. Sophisticated scheduling software including 
-fair-share, backfill, consumable resources, job preemption, many soft 
-and hard limits, etc. Very flexible queue structure (perhaps overly 
-complex). Limits are available on both a per process bs per-job  
-basis. Time limits include CPU time and wall-clock time. Many 
-configuration files with signals to daemons used to update 
-configuration (like LoadLeveler, good). All jobs must be initiated 
-through LSF to be accounted for and managed by LSF ("interactive" 
-jobs can be executed through a high priority queue for 
-smaller jobs). Job accounting only available in near real-time (important 
-for long-running jobs). Jobs initiated from same directory as 
-submitted from (not good for computer centers with diverse systems 
-under LSF control). Good status information on nodes and LSF daemons. 
-Allocates jobs either entire nodes or shared nodes depending upon 
+is a proprietary batch system and parallel job manager by
+Platform Computing. Widely deployed on a wide variety of computer
+architectures. Sophisticated scheduling software including
+fair-share, backfill, consumable resources, job preemption, many soft
+and hard limits, etc. Very flexible queue structure (perhaps overly
+complex). Limits are available on both a per process bs per-job
+basis. Time limits include CPU time and wall-clock time. Many
+configuration files with signals to daemons used to update
+configuration (like LoadLeveler, good). All jobs must be initiated
+through LSF to be accounted for and managed by LSF ("interactive"
+jobs can be executed through a high priority queue for
+smaller jobs). Job accounting only available in near real-time (important
+for long-running jobs). Jobs initiated from same directory as
+submitted from (not good for computer centers with diverse systems
+under LSF control). Good status information on nodes and LSF daemons.
+Allocates jobs either entire nodes or shared nodes depending upon
 configuration.
 
-A special version of MPI is required. LSF allocates interconnect 
-resources, spawns the user's processes, and manages the job 
-afterwards. While I am less familiar with LSF than LoadLeveler, they 
-appear very similar with LSF having the far more sophisticated 
-scheduler. We should carefully review their data structures and 
+A special version of MPI is required. LSF allocates interconnect
+resources, spawns the user's processes, and manages the job
+afterwards. While I am less familiar with LSF than LoadLeveler, they
+appear very similar with LSF having the far more sophisticated
+scheduler. We should carefully review their data structures and
 daemons before designing our own.
 
 
@@ -206,33 +206,33 @@ daemons before designing our own.
 
 
 Condor\footnote{http://www.cs.wisc.edu/condor/} is a
-batch system and parallel job manager 
-developed by the University of Wisconsin. 
-Condor was the basis for IBM's LoadLeveler and both share very similar 
-underlying infrastructure. Condor has a very sophisticated checkpoint/restart 
-service that does not rely upon kernel changes, but a variety of 
-library changes (which prevent it from being completely general). The 
-Condor checkpoint/restart service has been integrated into LSF, 
-Codine, and DPCS. Condor is designed to operate across a 
-heterogeneous environment, mostly to harness the compute resources of 
-workstations and PCs. It has an interesting "advertising" service. 
-Servers advertise their available resources and consumers advertise 
-their requirements for a broker to perform matches. The checkpoint 
-mechanism is used to relocate work on demand (when the "owner" of a 
+batch system and parallel job manager
+developed by the University of Wisconsin.
+Condor was the basis for IBM's LoadLeveler and both share very similar
+underlying infrastructure. Condor has a very sophisticated checkpoint/restart
+service that does not rely upon kernel changes, but a variety of
+library changes (which prevent it from being completely general). The
+Condor checkpoint/restart service has been integrated into LSF,
+Codine, and DPCS. Condor is designed to operate across a
+heterogeneous environment, mostly to harness the compute resources of
+workstations and PCs. It has an interesting "advertising" service.
+Servers advertise their available resources and consumers advertise
+their requirements for a broker to perform matches. The checkpoint
+mechanism is used to relocate work on demand (when the "owner" of a
 desktop machine wants to resume work).
 
 
 
 \section{Memory Channel (Compaq)}
 
-Memory Channel is a high-speed interconnect developed by 
-Digital/Compaq with related software for parallel job execution. 
-Special version of MPI required. The application spawns tasks on 
-other nodes. These tasks connect themselves to the high speed 
-interconnect. No system level tool to spawns the tasks, allocates 
-interconnect resources, or otherwise manages the parallel job (Note: 
-This is sometimes a problem when jobs fail, requiring system 
-administrators to release interconnect resources. There are also 
+Memory Channel is a high-speed interconnect developed by
+Digital/Compaq with related software for parallel job execution.
+Special version of MPI required. The application spawns tasks on
+other nodes. These tasks connect themselves to the high speed
+interconnect. No system level tool to spawns the tasks, allocates
+interconnect resources, or otherwise manages the parallel job (Note:
+This is sometimes a problem when jobs fail, requiring system
+administrators to release interconnect resources. There are also
 performance problems related to resource sharing).
 
 \section{Linux PAGG Process Aggregates}
@@ -249,7 +249,7 @@ could have been implemented as process aggregates.
 \section{BPROC}
 
 
-The Beowulf Distributed Process Space 
+The Beowulf Distributed Process Space
 (BProc\footnote{http://bproc.sourceforge.net/})
 is set of kernel
 modifications, utilities and libraries which allow a user to start
@@ -262,7 +262,7 @@ received using the usual wait() mechanisms.
 
 \section{xcat}
 
-Presumably IBM's suite of cluster management software 
+Presumably IBM's suite of cluster management software
 (xcat\footnote{http://publib-b.boulder.ibm.com/Redbooks.nsf/RedbookAbstracts/sg246041.html})
 includes a batch system.  Look into this.
 
@@ -272,16 +272,16 @@ CPLANT\footnote{http://www.cs.sandia.gov/cplant/} includes
 Parallel Job Launcher, Compute Node Daemon Process,
 Compute Node Allocator, Compute Node Status Tool.
 
-\section{NQS} 
+\section{NQS}
 
-NQS\footnote{http://umbc7.umbc.edu/nqs/nqsmain.html}, 
+NQS\footnote{http://umbc7.umbc.edu/nqs/nqsmain.html},
 the Network Queueing System, is a serial batch system.
 
 \section{LAM / MPI}
 
 LAM (Local Area Multicomputer)\footnote{http://www.lam-mpi.org/}
-is an MPI programming environment and development system for heterogeneous 
-computers on a network. 
+is an MPI programming environment and development system for heterogeneous
+computers on a network.
 With LAM, a dedicated cluster or an existing network
 computing infrastructure can act as one parallel computer solving
 one problem.  LAM features extensive debugging support in the
@@ -299,9 +299,9 @@ the Standard for message-passing libraries.
 
 Quadrics
 RMS\footnote{http://www.quadrics.com/downloads/documentation/}
-(Resource Management System) is a cluster management system for 
+(Resource Management System) is a cluster management system for
 Linux and Tru64 which supports the
-Elan3 interconnect.  
+Elan3 interconnect.
 
 \section{Sun Grid Engine}
 
@@ -310,11 +310,11 @@ SGE\footnote{http://www.sun.com/gridware/} is now proprietary.
 
 \section{SCIDAC}
 
-The Scientific Discovery through Advanced Computing (SciDAC) 
+The Scientific Discovery through Advanced Computing (SciDAC)
 project\footnote{http://www.scidac.org/ScalableSystems}
 has a Resource Management and Accounting working group
-and a white paper\cite{Res2000}. Deployment of a system with 
-the required fault-tolerance and scalability is scheduled 
+and a white paper\cite{Res2000}. Deployment of a system with
+the required fault-tolerance and scalability is scheduled
 for June 2006.
 
 \section{GNU Queue}
@@ -326,9 +326,9 @@ Clubmask\footnote{http://clubmask.sourceforge.net} is based on bproc.
 Separate queueing system?
 
 \section{SQMX}
-Part of the SCE Project\footnote{http://www.opensce.org/}, 
+Part of the SCE Project\footnote{http://www.opensce.org/},
 SQMX\footnote{http://www.beowulf.org/pipermail/beowulf-announce/2001-January/000086.html} is worth taking a look at.
-                                                                                
+
 \newpage
 \bibliographystyle{plain}
 \bibliography{project}
diff --git a/doc/txt/README b/doc/txt/README
index 8ac92faa6aaa17dfc4601995bce6dfe12602fbea..126a4d0f761bcdef8d987610067357264e93d103 100644
--- a/doc/txt/README
+++ b/doc/txt/README
@@ -1,3 +1,3 @@
 This directory contains an assortment of design documents for various components.
 Many of these documents represent preliminary design work and contain quite dated
-information. 
+information.
diff --git a/doc/txt/elan.runtime.requirements.txt b/doc/txt/elan.runtime.requirements.txt
index 7cfb2e85720d78d4c0c24cf51e3cc9be78f7a007..1ddb2783bbfc90cf0f00cff7fc5dec354bab9685 100644
--- a/doc/txt/elan.runtime.requirements.txt
+++ b/doc/txt/elan.runtime.requirements.txt
@@ -1,5 +1,5 @@
 Quadrics Elan Runtime Requirements
-December 31, 2001 
+December 31, 2001
 By Jim Garlick
 
 Abstract
@@ -8,19 +8,19 @@ SLURM runs parallel programs that utilize the Quadrics Elan3 interconnect.
 In order for the processes in a job to communicate, they must present an Elan
 capability to the Elan device driver.  The SLURM partition manager allocates
 capabilities, and the SLURM job manager sets up the user environment and
-makes appropriate calls into the kernel on each node to facilitate 
+makes appropriate calls into the kernel on each node to facilitate
 communication.
 
 Quadrics Terminology
 
-program		A parallel application consisting of set of processes run 
+program		A parallel application consisting of set of processes run
 		in parallel on one or more nodes.
 
 process		A UNIX process.
 
-capability	The ELAN_CAPABILITY data structure is defined in 
-		<elan3/elanvp.h>.  A capability is uniquely constructed for a 
-		program.  Each process must present the program's capability 
+capability	The ELAN_CAPABILITY data structure is defined in
+		<elan3/elanvp.h>.  A capability is uniquely constructed for a
+		program.  Each process must present the program's capability
 		to the Elan3 device driver via elan3_attach() in order to
 		communicate.  ELAN_CAPABILITY includes:
 		- 128-bit secret key
@@ -37,8 +37,8 @@ program description
 		An abstraction added to the kernel by Quadrics, similar to
 		a process group, but impossible for an application to detach
 		from.  Using the calls prototyped in <rms/rmscall.h>, it is
-		possible to signal programs, collect aggregate accounting 
-		information for programs, and assign Elan capabilities and 
+		possible to signal programs, collect aggregate accounting
+		information for programs, and assign Elan capabilities and
 		contexts to programs.
 
 
@@ -48,41 +48,41 @@ The Partition Manager (PM) allocates node resources to parallel jobs for
 SLURM.  In the presense of a Quadrics Elan3 interconnect, it also allocates
 program descriptions and Elan contexts.
 
-A program description is allocated for each parallel job.  Program 
-descriptions are managed by the PM as a monotonically increasing integer 
+A program description is allocated for each parallel job.  Program
+descriptions are managed by the PM as a monotonically increasing integer
 value greater than zero.
 
-A range of Elan context numbers is allocated to each parallel program.  The 
-number of contexts in the range is the number of processes per node that will 
+A range of Elan context numbers is allocated to each parallel program.  The
+number of contexts in the range is the number of processes per node that will
 be making Elan communications calls.  Elan contexts are managed by the PM as
 a monotonically increasing integer value in the range of
-ELAN_RMS_BASE_CONTEXT_NUM to ELAN_RMS_TOP_CONTEXT_NUM (inclusive), 
+ELAN_RMS_BASE_CONTEXT_NUM to ELAN_RMS_TOP_CONTEXT_NUM (inclusive),
 defined in <elan3/elanvp.h>.
 
 On selecting nodes for a job: only contiguous nodes can utilize the hardware
 broadcast feature so preference should be given to contiguous ranges of nodes.
-This is a hardware limitation.  Broadcast packets on the switch are 
-routed using a data structure that includes the tree depth and link range.      
-The tree depth targets all the leaf nodes "below" the node at the specified     
+This is a hardware limitation.  Broadcast packets on the switch are
+routed using a data structure that includes the tree depth and link range.
+The tree depth targets all the leaf nodes "below" the node at the specified
 depth, and the range trims links off each side of the range.  One can't mask
-out nodes in the middle, and a broadcast or flood DMA will fail if any of       
-the destinations fail.  The same limitation should apply to federated switches 
+out nodes in the middle, and a broadcast or flood DMA will fail if any of
+the destinations fail.  The same limitation should apply to federated switches
 as they are simply a degenerate fat tree with additional depth (not as fat
 at the top as the 128-way building blocks).
-                                                                                
-I'll put this in the elan design document in slurm-doc. 
+
+I'll put this in the elan design document in slurm-doc.
 
 
 SLURM Job Manager Support
 
-The Job Manager (JM) manages the setup and execution of parallel jobs for 
+The Job Manager (JM) manages the setup and execution of parallel jobs for
 SLURM.  When running a job that uses the Elan interconnect, the job manager
-must initialize and distribute Elan capabilities and manage program 
-descriptions.  Additional environment setup is necessary to support Quadrics 
+must initialize and distribute Elan capabilities and manage program
+descriptions.  Additional environment setup is necessary to support Quadrics
 MPI jobs.
 
 When JM initializes the ELAN_CAPABILITY data structure for a job, it should
-first call the elan3_nullcap() function to set all structure members to known 
+first call the elan3_nullcap() function to set all structure members to known
 values.  The following members are then initialized:
 
 cap.Type	Set to either ELAN_CAP_TYPE_BLOCK or ELAN_CAP_TYPE_CYCLIC
@@ -97,7 +97,7 @@ cap.UserKey	This is a 128-bit secret key, unique to the program.  A rogue
 		user who knows this key could "dummy up" a capability on
 		one of the nodes running the program and perform remote DMA's
 		into the address space of the program's processes.  The key
-		is generated in some non-deterministic way, such as an MD5 
+		is generated in some non-deterministic way, such as an MD5
 		algorithm seeded by /dev/random.  Values are assigned in
 		32-bit blocks by addressing cap.UserKey.Values[0-3].
 
@@ -121,26 +121,26 @@ cap.Bitmap	The bitmap includes a bit for each possible process in the
 		on LowNode; two and three the two tasks on LowNode+1, etc..
 		the BT_SET macro is used to set the bits in the bitmap.
 		If a node in the LowNode - HighNode range is not allocated to
-		the program, its bits are clear.  
+		the program, its bits are clear.
 
 The capability and the program description are passed to each node running
-the parallel program in such a way as to avoid exposing the UserKey to 
+the parallel program in such a way as to avoid exposing the UserKey to
 rogue users.  The portion of the job manger that runs on each node must
 then execute a sequence of calls to prepare to execute the processes for
 this parallel program.
 
-First, the job manager forks.  The parent waits for the child to terminate 
+First, the job manager forks.  The parent waits for the child to terminate
 and then calls rms_prgdestroy().  The parent could call rms_prgsignal() to
 signal all processes in the program (on the node) when a program is to be
 aborted.  The child calls rms_prgcreate() to create the program description,
 and rms_prgaddcap() to make the capability available to processes that are
-members of the program description.  The child then forks each process in turn 
+members of the program description.  The child then forks each process in turn
 and waits for all processes to terminate.
 
 Next, in each process, rms_setcap() is called with the program's capability
 index (assigned in rms_prgaddcap() and this process's context number index,
 relative to the LowContext - HighContext range in the capability.
-The MPI runtime will subsequently call rms_ncaps() and rms_getcap() to 
+The MPI runtime will subsequently call rms_ncaps() and rms_getcap() to
 retrieve the capability for presentation to elan3_attach().
 Each process also sets several environment variables that are referenced by
 the elan/MPI runtime:
@@ -150,29 +150,29 @@ RMS_NNODES	Set to the number of nodes assigned to the program.
 RMS_NPROCS	Set to the number of processes in the program.
 
 RMS_NODEID	Set to the node ID for this node, indexed relative to the
-		program , e.g. an eight node program would run on nodes 0 
-		through 7 regardless of the Elan ID of the nodes and whether 
-		or not they are contiguous. 
+		program , e.g. an eight node program would run on nodes 0
+		through 7 regardless of the Elan ID of the nodes and whether
+		or not they are contiguous.
 
-RMS_PROCID	Set to the process ID of this process, indexed relative 
+RMS_PROCID	Set to the process ID of this process, indexed relative
 		to the program, e.g. a 16 process program would consist of
 		processes 0 through 15.  (If running two tasks per node
-		under block allocation, processes 0 and 1 would be allocated 
+		under block allocation, processes 0 and 1 would be allocated
 		to node 0; if cyclic allocation, processes 0 and 8 would be
 		allocated to node 0).
 
 RMS_RANK	Set to the MPI rank for the process.  Same as RMS_PROCID.
 
 Finally, the process forks once more, and the parent waits for the child,
-while the child execs the MPI process.  
+while the child execs the MPI process.
 (XXX This fork was determined experimentally to be necessary, reason unknown).
 (XXX Are RMS_MACHINE, RMS_RESOURCEID, RMS_JOBID useful or necessary?)
 
 
 SLURM Switch Manager Support
 
-The Switch Manager (SM) monitors the state of any networking equipment and 
-should be consulted by the partition manager to determine Elan/Elite link 
+The Switch Manager (SM) monitors the state of any networking equipment and
+should be consulted by the partition manager to determine Elan/Elite link
 status before allocating a set of nodes to a parallel program.
 
 Elite switch status is available via a JTAG connection.  Inside the Elite
@@ -191,11 +191,11 @@ program.
 
 Support for multi-rail is omitted.  This should not be difficult to add, but
 until we have test hardware, I suggest we leave it.  Infrastructure should
-be designed so it is possible to send >1 capability per program to nodes 
+be designed so it is possible to send >1 capability per program to nodes
 running multirail.
 
 Pdsh 1.5+/qshell implements the above runtime for single rail as a test.
-as a test.  (XXX Need to test with more complex applications than "mping".  
+as a test.  (XXX Need to test with more complex applications than "mping".
 See pdsh/qswutil.c for details).
 
 How does execution of TotalView impact this design?
diff --git a/doc/txt/heritage.txt b/doc/txt/heritage.txt
index 361bc5d01a22692febeae2980ba740d835365df9..51934ddc9a7a65fb0ea779552819144636b68476 100644
--- a/doc/txt/heritage.txt
+++ b/doc/txt/heritage.txt
@@ -1,4 +1,4 @@
-Record below the origins of code imported into SLURM from any source. 
+Record below the origins of code imported into SLURM from any source.
 This is critical to insure SLURM can be properly licensed. A sample
 entry follows.
 
@@ -16,37 +16,37 @@ Date:           2005
 Destination:    src/api/pmi.[ch]
 License:        Open Source, see listing for details
 Imported by:    Morris Jette
-Notes:		pmi.h is taken directly from the MPICH2 code base, 
+Notes:		pmi.h is taken directly from the MPICH2 code base,
                 pmi.c is an LLNL implementation of the functions defined
                 in pmi.h
 
-Origin:		LLNL Software Development Toolbox 	
+Origin:		LLNL Software Development Toolbox
 Date:    	2002
 Destination:	src/common/cbuf.[ch]
-License:	GPL 	
+License:	GPL
 Imported by:	Mark Grondona
-Notes:    	
+Notes:
 
 Origin: 	LLNL ConMan, http://www.llnl.gov/linux/conman
 Date:    	2001
 Destination:	src/common/fd.[ch]
 License: 	GPL
 Imported by:	Mark Grondona
-Notes:    	
+Notes:
 
 Origin: 	GNU libc
 Date:    	2001
 Destination:	src/common/getopt1.c, getopt.[ch]
 License: 	GPL
 Imported by:	Morris Jette
-Notes:    	
+Notes:
 
 Origin: 	LLNL Software Development Toolbox
 Date:    	2001
 Destination:	src/common/list.[ch]
 License: 	GPL
 Imported by:	Mark Grondona
-Notes:    	
+Notes:
 
 Origin:		?????????????????
 Date:		2001
@@ -78,7 +78,7 @@ Notes:
 
 Origin:		GNU taskset command source
 Date:		2005
-Destination:	src/plugins/task/affinity/schedutils.c	
+Destination:	src/plugins/task/affinity/schedutils.c
 License:	GPL
 Imported by:	Andy Riebs
 Notes:
@@ -88,7 +88,7 @@ Date:		1/13/2006
 Destination:	testsuite/expect/globals.example
 License:	None
 Imported by:	Morris Jette
-Notes:		Just the function dec2hex16 imported from on-line code 
+Notes:		Just the function dec2hex16 imported from on-line code
 		"cookbook."
 
 Origin:
diff --git a/doc/txt/slurmctld.locks b/doc/txt/slurmctld.locks
index 61eff454aea727bc9fc8d019cec06c3b88e5d99f..7764df25561e33d8b92863e7d071681d57a6bf2f 100644
--- a/doc/txt/slurmctld.locks
+++ b/doc/txt/slurmctld.locks
@@ -9,7 +9,7 @@ RPC code			Frequency	Function to service
 ============================================================================
 
 REQUEST_BUILD_INFO:		Rare		controller.c:fill_ctld_conf
-	Read	Config	
+	Read	Config
 
 
 REQUEST_NODE_INFO:		V Common	node_mgr.c:pack_all_node
@@ -26,7 +26,7 @@ REQUEST_PARTITION_INFO:		V Common	partition_mgr.c:pack_all_part
 
 REQUEST_JOB_STEP_CREATE:
 REQUEST_RESOURCE_ALLOCATION :	Common		job_mgr.c:job_allocate
-REQUEST_IMMEDIATE_RESOURCE_ALLOCATION : 	(Both use slurm_rpc_allocate_resources)	
+REQUEST_IMMEDIATE_RESOURCE_ALLOCATION : 	(Both use slurm_rpc_allocate_resources)
 	Read	Node, Partition
 	Write	Job
 
diff --git a/doc/txt/testing.txt b/doc/txt/testing.txt
index db3bc19d21f049664a798b3e99fb5dd117c5080d..95d964133ed1be91ed44a1e68a65d6262a51a14d 100644
--- a/doc/txt/testing.txt
+++ b/doc/txt/testing.txt
@@ -2,39 +2,39 @@ SLURM Testing Intrastructure
 ----------------------------
 
 The testing infrastructure for SLURM is based on Dejagnu, and consequently on
-expect/tcl.  
+expect/tcl.
 
 Directory Structure
 -------------------
-The entire testing infrastructure is located under the slurm/testsuite 
-directory.  
+The entire testing infrastructure is located under the slurm/testsuite
+directory.
 
-testsuite/config 
+testsuite/config
 	contains the default config files for dejagnu.
 
-testsuite/slurm_unit 
-	Has a directory structure that mirrors slurm/src, and 
-	contains the unit test cases for them.  For example, the directory 
-	slurm_unit/common will contain unit test cases for modules located in 
-	src/common.  
+testsuite/slurm_unit
+	Has a directory structure that mirrors slurm/src, and
+	contains the unit test cases for them.  For example, the directory
+	slurm_unit/common will contain unit test cases for modules located in
+	src/common.
 
 testsuite/slurm*
 	any directory starting with slurm may contain testcases to run.  Dejagnu
-	will find and execute an ".exp" files that are located in the slurm* 
+	will find and execute an ".exp" files that are located in the slurm*
 	directories, and any of it's subdirectories.
 
 
 Testing Scripts
 ---------------
-Drop in a .exp file into a testsuite/slurm[*] directory.  Dejagnu will 
+Drop in a .exp file into a testsuite/slurm[*] directory.  Dejagnu will
 automagically run it when either "make check" or "runtest" are executed.
 By using the functions pass, fail, untested, unresolved, and note to express
 the results of test cases, DejaGNU records these results and prints out
-summaries of all test cases. 
+summaries of all test cases.
 
 Unit Testing
 ------------
-To unit test using our infrastructure, 
+To unit test using our infrastructure,
 
 #include <testsuite/dejagnu.h>
 
@@ -44,12 +44,12 @@ untested( char*, ... )
 unresolved( char*, ... )
 note( char*, ... )
 
-totals() 
+totals()
 
 Note: All of the funtctions, except totals, behave like printf.
 
 There is a runall.exp that will be placed in every unit testing directory,
-that will run all programs that end with "test".  If you have special needs 
+that will run all programs that end with "test".  If you have special needs
 from the launching test script, you must write your own.  If you write your
 own .exp file to launch your scipts, please use names that do not end in
 "test" to avoid strange resluts.