From 34869062ae7cc26ef65c612aeec562ceeb9a1a8f Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Thu, 22 Jul 2010 19:37:43 +0000
Subject: [PATCH] remove references to vestigial job parameters (min
 sockets/cores/threads)

---
 doc/html/mc_support.shtml | 155 +++++++++++---------------------------
 1 file changed, 46 insertions(+), 109 deletions(-)

diff --git a/doc/html/mc_support.shtml b/doc/html/mc_support.shtml
index 5123b957463..3d6f0cd4ab6 100644
--- a/doc/html/mc_support.shtml
+++ b/doc/html/mc_support.shtml
@@ -90,21 +90,6 @@ by their application.  Table 1 summarizes the new multi-core flags.
 		| <u>[block|cyclic]:[block|cyclic]</u>
 </td></tr>
 <tr><td colspan=2>
-<b><a href="#srun_constraints">New Constraints</b>
-</td></tr>
-<tr>
-    <td> --minsockets=<i>MinS</i></td>
-    <td> Nodes must meet this minimum number of sockets
-</td></tr>
-<tr>
-    <td> --mincores=<i>MinC</i></td>
-    <td> Nodes must meet this minimum number of cores per socket
-</td></tr>
-<tr>
-    <td> --minthreads=<i>MinT</i></td>
-    <td> Nodes must meet this minimum number of threads per core
-</td></tr>
-<tr><td colspan=2>
 <b><a href="#srun_consres">Memory as a consumable resource</a></b>
 </td></tr>
 <tr>
@@ -290,29 +275,6 @@ to -m block:cyclic with --cpu_bind=thread.</p>
 
 <p>See also 'srun --help'</p>
 
-<a name="srun_constraints">
-<h3>New Constraints</h3></a>
-
-<p>To complement the existing SLURM job minimum constraints (CPUs,
-memory, temp disk), constraint flags have also been added to allow a
-user to request nodes with a minimum number of sockets, cores, or
-threads:</p>
-
-<PRE>
---mincpus=<i>n</i>             minimum number of logical processors (threads) per node
-<u>--minsockets=<i>n</i></u>          minimum number of sockets per node
-<u>--mincores=<i>n</i></u>            minimum number of cores per socket
-<u>--minthreads=<i>n</i></u>          minimum number of threads per core
---mem=<i>MB</i>                minimum amount of real memory
---tmp=<i>MB</i>                minimum amount of temporary disk
-</PRE>
-
-<p>These constraints are separate from the -N or -B allocation minimums.
-Using these constraints allows the user to exclude smaller nodes from
-the allocation request.</p>
-
-<p>See also 'srun --help' and 'man srun'</p>
-
 <a name="srun_consres">
 <h3>Memory as a Consumable Resource</h3></a>
 
@@ -321,9 +283,9 @@ needed by the job per node.  This flag is used to support the memory
 as a consumable resource allocation strategy.</p>
 
 <PRE>
---job-mem=<i>MB</i>            maximum amount of real memory per node
-                        required by the job.
-                        --mem >= --job-mem if --mem is specified.
+--job-mem=<i>MB</i>      maximum amount of real memory per node
+                  required by the job.
+                  --mem >= --job-mem if --mem is specified.
 </PRE>
 
 <p>This flag allows the scheduler to co-allocate jobs on specific nodes
@@ -334,15 +296,13 @@ of memory on the nodes.</p>
 <p>In order to use memory as a consumable resource, the select/cons_res
 plugin must be first enabled in slurm.conf:
 <PRE>
-SelectType=select/cons_res        # enable consumable resources
-SelectTypeParameters=CR_Memory    # memory as a consumable resource
+SelectType=select/cons_res      # enable consumable resources
+SelectTypeParameters=CR_Memory  # memory as a consumable resource
 </PRE>
 
 <p> Using memory as a consumable resource can also be combined with
-the CPU, Socket, or Core consumable resources via:
-<PRE>
-CR_CPU_Memory, CR_Socket_Memory, CR_Core_Memory
-</PRE>
+the CPU, Socket, or Core consumable resources using SelectTypeParameters
+values of: CR_CPU_Memory, CR_Socket_Memory or CR_Core_Memory
 
 <p>See the "Resource Selection" section if generating slurm.conf
 via <a href="configurator.html">configurator.html</a>.
@@ -368,9 +328,9 @@ allocation as one step in a larger job script.
 This can now be specified via the following flags:</p>
 
 <PRE>
---ntasks-per-node=<i>n</i>     number of tasks to invoke on each node
---ntasks-per-socket=<i>n</i>   number of tasks to invoke on each socket
---ntasks-per-core=<i>n</i>     number of tasks to invoke on each core
+--ntasks-per-node=<i>n</i>    number of tasks to invoke on each node
+--ntasks-per-socket=<i>n</i>  number of tasks to invoke on each socket
+--ntasks-per-core=<i>n</i>    number of tasks to invoke on each core
 </PRE>
 
 <p>For example, given a cluster with nodes containing two sockets,
@@ -384,19 +344,19 @@ hydra12
 hydra12
 % srun -n 4 --ntasks-per-node=1 hostname
 hydra12
+hydra13
 hydra14
 hydra15
-hydra13
 % srun -n 4 --ntasks-per-node=2 hostname
 hydra12
+hydra12
 hydra13
 hydra13
-hydra12
 % srun -n 4 --ntasks-per-socket=1 hostname
 hydra12
+hydra12
 hydra13
 hydra13
-hydra12
 % srun -n 4 --ntasks-per-core=1 hostname
 hydra12
 hydra12
@@ -771,16 +731,10 @@ parts*    4    2:2:1    2        2        1
 the following identifiers are available:</p>
 
 <PRE>
-%H  Minimum number of sockets per node requested by the job.
-    This reports the value of the srun --minsockets option.
-%I  Minimum number of cores per socket requested by the job.
-    This reports the value of the srun --mincores option.
-%J  Minimum number of threads per core requested by the job.
-    This reports the value of the srun --minthreads option.
 %m  Minimum size of memory (in MB) requested by the job
-%X  Number of requested sockets per node
-%Y  Number of requested cores per socket
-%Z  Number of requested threads per core
+%H  Number of requested sockets per node
+%I  Number of requested cores per socket
+%J  Number of requested threads per core
 %z  Extended processor information: number of requested
     sockets, cores, threads (S:C:T) per node
 </PRE>
@@ -789,31 +743,20 @@ the following identifiers are available:</p>
 
 <UL>
 <UL>
-<DL>% srun -n 4 -B 2:2:1
-	--minsockets=4 --mincores=2 --minthreads=1 --mem=1024 sleep 100 &
+<DL>% srun -n 4 -B 2:2:1 --mem=1024 sleep 100 &
 </UL>
 </UL>
 
 <PRE>
-% squeue -o '%.5i %.2t %.4M %.5D %7X %7Y %7Z %7z %R'
-JOBID ST TIME NODES SOCKETS CORES   THREADS S:C:T   NODELIST(REASON)
-   17 PD 0:00     1 2       2       1       2:2:1   (Resources)
-   18 PD 0:00     1 2       2       1       2:2:1   (Resources)
-   19 PD 0:00     1 2       2       1       2:2:1   (Resources)
-   13  R 1:27     1 2       2       1       2:2:1   hydra12
-   14  R 1:26     1 2       2       1       2:2:1   hydra13
-   15  R 1:26     1 2       2       1       2:2:1   hydra14
-   16  R 1:26     1 2       2       1       2:2:1   hydra15
-
-% squeue -o '%.5i %.2t %.4M %.5D %9c %11H %9I %11J'
-JOBID ST TIME NODES MIN_PROCS MIN_SOCKETS MIN_CORES MIN_THREADS
-   17 PD 0:00     1 1         4           2         1
-   18 PD 0:00     1 1         4           2         1
-   19 PD 0:00     1 1         4           2         1
-   13  R 1:29     1 0         0           0         0
-   14  R 1:28     1 0         0           0         0
-   15  R 1:28     1 0         0           0         0
-   16  R 1:28     1 0         0           0         0
+% squeue -o '%.5i %.2t %.4M %.5D %7H %6I %7J %6z %R'
+JOBID ST TIME NODES SOCKETS CORES THREADS S:C:T NODELIST(REASON)
+   17 PD 0:00     1 2       2     1       2:2:1 (Resources)
+   18 PD 0:00     1 2       2     1       2:2:1 (Resources)
+   19 PD 0:00     1 2       2     1       2:2:1 (Resources)
+   13  R 1:27     1 2       2     1       2:2:1 hydra12
+   14  R 1:26     1 2       2     1       2:2:1 hydra13
+   15  R 1:26     1 2       2     1       2:2:1 hydra14
+   16  R 1:26     1 2       2     1       2:2:1 hydra15
 </PRE>
 
 <p>
@@ -865,32 +808,27 @@ the job starts running).
 
 <PRE>
 Requested Allocation:
-   ReqSockets=&lt;count&gt;   Set the job's count of required sockets
-   ReqCores=&lt;count&gt;     Set the job's count of required cores
-   ReqThreads=&lt;count&gt;   Set the job's count of required threads
-
-Constraints:
-   MinSockets=&lt;count&gt;   Set the job's minimum number of sockets per node
-   MinCores=&lt;count&gt;     Set the job's minimum number of cores per socket
-   MinThreads=&lt;count&gt;   Set the job's minimum number of threads per core
+  ReqSockets=&lt;count&gt;  Set the job's count of required sockets
+  ReqCores=&lt;count&gt;    Set the job's count of required cores
+  ReqThreads=&lt;count&gt;  Set the job's count of required threads
 </PRE>
 
 <p>For example:</p>
 
 <PRE>
-# scontrol update JobID=18 MinThreads=2
-# scontrol update JobID=18 MinCores=4
-# scontrol update JobID=18 MinSockets=8
-
-% squeue -o '%.5i %.2t %.4M %.5D %9c %11H %9I %11J'
-JOBID ST TIME NODES MIN_PROCS MIN_SOCKETS MIN_CORES MIN_THREADS
-   17 PD 0:00     1 1         4           2         1
-   18 PD 0:00     1 1         8           4         2
-   19 PD 0:00     1 1         4           2         1
-   13  R 1:35     1 0         0           0         0
-   14  R 1:34     1 0         0           0         0
-   15  R 1:34     1 0         0           0         0
-   16  R 1:34     1 0         0           0         0
+# scontrol update JobID=17 ReqThreads=2
+# scontrol update JobID=18 ReqCores=4
+# scontrol update JobID=19 ReqSockets=8
+
+% squeue -o '%.5i %.2t %.4M %.5D %9c %7H %6I %8J'
+JOBID ST TIME NODES MIN_PROCS SOCKETS CORES THREADS
+   17 PD 0:00     1 1         4       2     1
+   18 PD 0:00     1 1         8       4     2
+   19 PD 0:00     1 1         4       2     1
+   13  R 1:35     1 0         0       0     0
+   14  R 1:34     1 0         0       0     0
+   15  R 1:34     1 0         0       0     0
+   16  R 1:34     1 0         0       0     0
 </PRE>
 
 <p>The 'scontrol show job' command can be used to display
@@ -898,7 +836,7 @@ the number of allocated CPUs per node as well as the socket, cores,
 and threads specified in the request and constraints.
 
 <PRE>
-% srun -N 2 -B 2:1-1 sleep 100 &
+% srun -N 2 -B 2:1 sleep 100 &
 % scontrol show job 20
 JobId=20 UserId=(30352) GroupId=users(1051)
    Name=sleep
@@ -907,10 +845,9 @@ JobId=20 UserId=(30352) GroupId=users(1051)
    JobState=RUNNING StartTime=09/25-17:17:30 EndTime=NONE
    NodeList=hydra[12-14] NodeListIndices=0,2,-1
    <u>AllocCPUs=1,2,1</u>
-   ReqProcs=4 ReqNodes=2 <u>ReqS:C:T=2:1-1</u>
+   ReqProcs=4 ReqNodes=2 <u>ReqS:C:T=2:1:*</u>
    Shared=0 Contiguous=0 CPUs/task=0
-   MinProcs=0 <u>MinSockets=0 MinCores=0 MinThreads=0</u>
-   MinMemory=0 MinTmpDisk=0 Features=(null)
+   MinProcs=0 MinMemory=0 MinTmpDisk=0 Features=(null)
    Dependency=0 Account=(null) Reason=None Network=(null)
    ReqNodeList=(null) ReqNodeListIndices=-1
    ExcNodeList=(null) ExcNodeListIndices=-1
@@ -1044,7 +981,7 @@ using NodeName:
 </PRE>
 
 <!-------------------------------------------------------------------------->
-<p style="text-align:center;">Last modified 13 October 2009</p>
+<p style="text-align:center;">Last modified 22 July 2010</p>
 
 <!--#include virtual="footer.txt"-->
 
-- 
GitLab