diff --git a/doc/man/man1/slaunch.1 b/doc/man/man1/slaunch.1 index 9548d1564009eaff6083962ab9e6b760ea4ec2f1..788d95a5bd9d7b671947100a919caa66696b3d32 100644 --- a/doc/man/man1/slaunch.1 +++ b/doc/man/man1/slaunch.1 @@ -8,7 +8,15 @@ slaunch \- Launch a parallel application under a SLURM job allocation. slaunch [\fIoptions\fP] <\fIcommand\fP> [\fIcommand args\fR] .SH "DESCRIPTION" .LP -slaunch launches a parallel application (a \fBjob step\fR in SLURM parlance) on the nodes, or subset of nodes, in a \fBjob allocation\fR. A valid job allocation is a prerequisite of running slaunch. The ID of the job allocation may be passed to slaunch through either the \fB\-\-jobid\fR command line parameter or the \fBSLURM_JOBID\fR environment variable. The \fBsalloc\fR and \fBsbatch\fR commands may be used to request a job allocation, and each of those commands automatically set the \fBSLURM_JOBID\fR environment variable. +slaunch launches a parallel application (a \fBjob step\fR in SLURM parlance) +on the nodes, or subset of nodes, in a \fBjob allocation\fR. A valid job +allocation is a prerequisite of running slaunch. The ID of the job allocation +may be passed to slaunch through either the \fB\-\-jobid\fR command line +parameter or the \fBSLAUNCH_JOBID\fR environment variable. The \fBsalloc\fR +and \fBsbatch\fR commands may be used to request a job allocation, and each +of those commands automatically set the \fBSLURM_JOB_ID\fR environment variable, +which is also understood by slaunch. Users should not set SLURM_JOB_ID on their +own; use SLAUNCH_JOBID instead. .SH "OPTIONS" .LP .TP @@ -29,6 +37,85 @@ Specify the first node in the allocation on which this job step will be launched \fB\-c\fR, \fB\-\-cpus\-per\-task\fR[=]<\fIncpus\fR> Specify that each task requires \fIncpus\fR number of CPUs. Useful for applications in which each task will launch multiple threads and can therefore benefit from there being free processors on the node. +.TP +\fB\-J\fR, \fB\-\-name\fR[=]<\fIname\fR> +Set the name of the job step. By default, the job step's name will be the +name of the executable which slaunch is launching. + +.TP +\fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR +Terminate the job step if any task exits with a non-zero exit code. By default +slaunch will not terminate a job step because of a task with a non-zero exit +code. + +.TP +\fB\-l\fR, \fB\-\-label\fR +Prepend each line of task standard output or standard error with the task +number of its origin. + +.TP +\fB\-u\fR, \fB\-\-unbuffered\fR +Do not line buffer standard output or standard error from remote tasks. +This option cannot be used with \-\-label. + +.TP +\fB\-C\fR, \fB\-\-overcommit\fR +Permit the allocation of more tasks to a node than there are available processors. +Normally SLURM will only allow up to N tasks on a node with N processors, but +this option will allow more than N tasks to be assigned to a node. + +.TP +\fB\-q\fR, \fB\-\-quiet\fR +Suppress informational messages from slaunch. Errors will still be displayed. + +.TP +\fB\-v\fR, \fB\-\-verbose\fR +Increase the verbosity of slaunch's informational messages. Multiple \-v's +will further increase slaunch's verbosity. + +.TP +\fB\-d\fR, \fB\-\-slurmd-debug\fR[=]<\fIlevel\fR> +Specify a debug level for slurmd(8). \fIlevel\fR may be an integer value +between 0 [quiet, only errors are displayed] and 4 [verbose operation]. +The slurmd debug information is copied onto the stderr of +the job. By default only errors are displayed. + +.TP +\fB\-W\fR, \fB\-\-wait\fR[=]<\fIseconds\fR> +slaunch will wait the specified number of seconds after the first tasks exits +before killing all tasks in the job step. If the value is 0, slaunch will +wait indefinitely for all tasks to exit. The default value is give by the +WaitTime parameter in the slurm configuration file (see \fBslurm.conf(5)\fR). + +The \-\-wait option can be used to insure that a job step terminates in a timely +fashion in the event that one or more tasks terminate prematurely. + +.TP +\fB\-m\fR, \fB\-\-distribution\fR[=]<\fI(cyclic|block|arbitrary)\fR> +Select a task distribution pattern. (The default is cyclic.) +.RS +.TP +.B cyclic +The cyclic method distributes processes in a round-robin fashion across +the allocated nodes. That is, task zero will be allocated to the first +node, task one to the second, and so on. +.TP +.B block +The block method of distribution will allocate tasks in-order to +the cpus on a node. If the number of tasks exceeds the number of +cpus on all of the nodes in the allocation then all nodes will be +utilized. For example, consider an allocation of three nodes each with +two cpus. A four\-task block distribution request will distribute +those tasks to the nodes with tasks zero and one on the first +node, task two on the second node, and task three on the third node. +.TP +.B arbitrary +The arbitrary method of distribution allows the user to manually specify any +arbitrary layout of tasks on nodes. Normally the arbitrary task distribution +method will be invoked implicitly by using one of the options +\-\-task\-layout\-byid, \-\-task\-layout\-byname, or \-\-task\-layout\-file. +.RE + .TP \fB\-w\fR, \fB\-\-nodelist\-byname\fR[=]<\fInode name list\fR> Request a specific list of node names. The list may be specified as a comma\-separated list of node names, or a range of node names (e.g. mynode[1\-5,7,...]). Duplicate node names are not permitted in the list. @@ -56,12 +143,17 @@ the Nth task should be launched. For example, a nodelist of mynode[4,3,1\-2,4] means that tasks 0 and 4 will run on mynode4, task 1 will run on mynode3, task 2 will run on mynode1, and task 3 will run on mynode2. -NOTE: This option implicitly sets the task distribution method to "arbitrary". Some network switch layers do not permit arbitrary task layout. +NOTE: This option implicitly sets the task distribution method to "arbitrary". +Some network switch layers do not permit arbitrary task layout. + .TP \fB\-F\fR, \fB\-\-task\-layout\-file\fR[=]<\fIfilename\fR> -Request a specific task layout. This options much like the \-\-task\-layout option, except that instead of a nodelist you supply the name of a file. The file contains a nodelist that may span multiple lines of the file. +Request a specific task layout. This options much like the \-\-task\-layout +option, except that instead of a nodelist you supply the name of a file. +The file contains a nodelist that may span multiple lines of the file. -NOTE: This option implicitly sets the task distribution method to "arbitrary". Some network switch layers do not permit arbitrary task layout. +NOTE: This option implicitly sets the task distribution method to "arbitrary". +Some network switch layers do not permit arbitrary task layout. .TP \fB\-i\fR, \fB\-\-slaunch\-input\fR[=]<\fIfilename pattern\fR> @@ -175,21 +267,97 @@ Task rank number. (Will result in a separate file per task.) .RS -10 .TP -\fB\-D\fR, \fB\-\-workdir\fR=\fIpath\fR +\fB\-D\fR, \fB\-\-workdir\fR[=]<\fIpath\fR> Set the working directory of the tasks to \fIpath\fR before execution. The default task working directory is slaunch's working directory. .TP -\fB\-\-nice\fR=<\fIadjustment\fR> +\fB\-\-mpi\fR[=]<\fImpi_type\fR> +Identify the type of MPI to be used. + +.TP +\fB\-\-nice\fR[=]<\fIadjustment\fR> Run the job with an adjusted scheduling priority. With no adjustment value the scheduling priority is decreased by 100. The adjustment range is from \-10000 (highest priority) to 10000 (lowest priority). Only privileged users can specify a negative adjustment. NOTE: This option is presently ignored if SchedulerType=sched/maui. +.TP +\fB\-\-uid\fR[=]<\fIuser\fR> +Attempt to submit and/or run a job as \fIuser\fR instead of the +invoking user id. The invoking user's credentials will be used +to check access permissions for the target partition. User root +may use this option to run jobs as a normal user in a RootOnly +partition for example. If run as root, \fBslaunch\fR will drop +its permissions to the uid specified after node allocation is +successful. \fIuser\fR may be the user name or numerical user ID. + +.TP +\fB\-\-gid\fR[=]<\fIgroup\fR> +If \fBslaunch\fR is run as root, and the \fB\-\-gid\fR option is used, +submit the job with \fIgroup\fR's group access permissions. \fIgroup\fR +may be the group name or the numerical group ID. + +.TP +\fB\-\-core\fR[=]<\fItype\fR> +Adjust corefile format for parallel job. If possible, slaunch will set +up the environment for the job such that a corefile format other than +full core dumps is enabled. If run with type = "list", slaunch will +print a list of supported corefile format types to stdout and exit. + +.TP +\fB\-\-propagate\fR[=\fIrlimits\fR] +Allows users to specify which of the modifiable (soft) resource limits +to propagate to the compute nodes and apply to their jobs. If +\fIrlimits\fR is not specified, then all resource limits will be +propagated. + +.TP +\fB\-\-prolog\fR[=]<\fIexecutable\fR> +\fBslaunch\fR will run \fIexecutable\fR just before launching the job step. +The command line arguments for \fIexecutable\fR will be the command +and arguments of the job step. If \fIexecutable\fR is "none", then +no prolog will be run. This parameter overrides the SrunProlog +parameter in slurm.conf. + +.TP +\fB\-\-epilog\fR[=]<\fIexecutable\fR> +\fBslaunch\fR will run \fIexecutable\fR just after the job step completes. +The command line arguments for \fIexecutable\fR will be the command +and arguments of the job step. If \fIexecutable\fR is "none", then +no epilog will be run. This parameter overrides the SrunEpilog +parameter in slurm.conf. + +.TP +\fB\-\-task\-prolog\fR[=]<\fIexecutable\fR> +The \fBslurmd\fR daemon will run \fIexecutable\fR just before launching +each task. This will be executed after any TaskProlog parameter +in slurm.conf is executed. +Besides the normal environment variables, this has SLURM_TASK_PID +available to identify the process ID of the task being started. +Standard output from this program of the form +"export NAME=value" will be used to set environment variables +for the task being spawned. + +.TP +\fB\-\-task\-epilog\fR[=]<\fIexecutable\fR> +The \fBslurmd\fR daemon will run \fIexecutable\fR just after each task +terminates. This will be before after any TaskEpilog parameter +in slurm.conf is executed. This is meant to be a very short-lived +program. If it fails to terminate within a few seconds, it will +be killed along with any descendant processes. + +.TP +\fB\-\-ctrl\-comm\-ifhn\fR[=]<\fIaddr\fR> +Specify the address or hostname to be used for PMI communications only +(MPCIH2 communication bootstrapping mechanism). +Defaults to short hostname of the node on which slaunch is running. .TP \fB\-h\fR, \fB\-\-help\fR Output help information and exit. + .TP \fB\-V\fR, \fB\-\-version\fR Output version information and exit. + .SH "ENVIRONMENT VARIABLES" .LP .TP @@ -200,6 +368,7 @@ Same as \fB\-\-jobid\fR. To launch a job step (parallel program) in an existing job allocation: .IP slaunch \-\-jobid 66777 \-N2 \-n8 myprogram + .LP To grab an allocation of nodes and launch a parallel application on one command line (See the \fBsalloc\fR man page for more examples): .IP diff --git a/src/salloc/opt.c b/src/salloc/opt.c index 61a8a7daf3e2f616fdc12825b197678a9848450a..643d51b5502bd787376daf700c53673d890326c8 100644 --- a/src/salloc/opt.c +++ b/src/salloc/opt.c @@ -71,8 +71,6 @@ #include "src/salloc/salloc.h" #include "src/salloc/opt.h" -#include "src/common/mpi.h" - /* generic OPT_ definitions -- mainly for use with env vars */ #define OPT_NONE 0x00 #define OPT_INT 0x01 @@ -1278,7 +1276,7 @@ static void _usage(void) " [--verbose]\n" " [-W sec]\n" " [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n" -" [--mpi=type] [--account=name] [--dependency=jobid]\n" +" [--account=name] [--dependency=jobid]\n" " [--propagate[=rlimits] ]\n" #ifdef HAVE_BG /* Blue gene specific options */ " [--geometry=XxYxZ] [--conn-type=type] [--no-rotate]\n" @@ -1305,7 +1303,6 @@ static void _help(void) " -s, --share share nodes with other jobs\n" " -J, --job-name=jobname name of job\n" " --jobid=id specify jobid to use\n" -" --mpi=type type of MPI being used\n" " -W, --wait=sec seconds to wait for allocation if not\n" " immediately available\n" " -v, --verbose verbose mode (multiple -v's increase verbosity)\n" @@ -1314,7 +1311,6 @@ static void _help(void) " --nice[=value] decrease secheduling priority by value\n" " -U, --account=name charge job to specified account\n" " --propagate[=rlimits] propagate all [or specific list of] rlimits\n" -" --mpi=type specifies version of MPI to use\n" " --begin=time defer job until HH:MM DD/MM/YY\n" " --mail-type=type notify on state change: BEGIN, END, FAIL or ALL\n" " --mail-user=user who to send email notification for job state changes\n" diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c index 8e1e9dedb1d42a1a662e503d288946cd053a7bf8..44f673d592f7ad9dac5631c4adddca5eb4d3c3c8 100644 --- a/src/sbatch/opt.c +++ b/src/sbatch/opt.c @@ -70,8 +70,6 @@ #include "src/sbatch/opt.h" -#include "src/common/mpi.h" - /* generic OPT_ definitions -- mainly for use with env vars */ #define OPT_NONE 0x00 #define OPT_INT 0x01 @@ -82,7 +80,6 @@ #define OPT_CONN_TYPE 0x08 #define OPT_NO_ROTATE 0x0a #define OPT_GEOMETRY 0x0b -#define OPT_MPI 0x0c #define OPT_MULTI 0x0f /* generic getopt_long flags, integers and *not* valid characters */ @@ -93,7 +90,6 @@ #define LONG_OPT_CONT 0x109 #define LONG_OPT_UID 0x10a #define LONG_OPT_GID 0x10b -#define LONG_OPT_MPI 0x10c #define LONG_OPT_CORE 0x10e #define LONG_OPT_CONNTYPE 0x111 #define LONG_OPT_NETWORK 0x114 @@ -511,7 +507,6 @@ env_vars_t env_vars[] = { {"SBATCH_REMOTE_CWD", OPT_STRING, &opt.cwd, NULL }, {"SBATCH_TIMELIMIT", OPT_INT, &opt.time_limit, NULL }, {"SBATCH_WAIT", OPT_INT, &opt.max_wait, NULL }, - {"SBATCH_MPI_TYPE", OPT_MPI, NULL, NULL }, {NULL, 0, NULL, NULL} }; @@ -589,14 +584,6 @@ _process_env_var(env_vars_t *e, const char *val) } break; - case OPT_MPI: - if (srun_mpi_init((char *)val) == SLURM_ERROR) { - fatal("\"%s=%s\" -- invalid MPI type, " - "--mpi=list for acceptable types.", - e->var, val); - } - break; - default: /* do nothing */ break; @@ -639,7 +626,6 @@ static struct option long_options[] = { {"exclusive", no_argument, 0, LONG_OPT_EXCLUSIVE}, {"mincpus", required_argument, 0, LONG_OPT_MINCPU}, {"mem", required_argument, 0, LONG_OPT_MEM}, - {"mpi", required_argument, 0, LONG_OPT_MPI}, {"tmp", required_argument, 0, LONG_OPT_TMP}, {"jobid", required_argument, 0, LONG_OPT_JOBID}, {"uid", required_argument, 0, LONG_OPT_UID}, @@ -1090,13 +1076,6 @@ static void _set_options(int argc, char **argv) exit(1); } break; - case LONG_OPT_MPI: - if (srun_mpi_init((char *)optarg) == SLURM_ERROR) { - fatal("\"--mpi=%s\" -- long invalid MPI type, " - "--mpi=list for acceptable types.", - optarg); - } - break; case LONG_OPT_TMP: opt.tmpdisk = _to_bytes(optarg); if (opt.tmpdisk < 0) { @@ -1572,7 +1551,7 @@ static void _usage(void) " [--jobid=id] [--verbose]\n" " [-W sec]\n" " [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n" -" [--mpi=type] [--account=name] [--dependency=jobid]\n" +" [--account=name] [--dependency=jobid]\n" " [--propagate[=rlimits] ]\n" #ifdef HAVE_BG /* Blue gene specific options */ " [--geometry=XxYxZ] [--conn-type=type] [--no-rotate]\n" @@ -1605,7 +1584,6 @@ static void _help(void) " -s, --share share nodes with other jobs\n" " -J, --job-name=jobname name of job\n" " --jobid=id run under already allocated job\n" -" --mpi=type type of MPI being used\n" " -W, --wait=sec seconds to wait after first task exits\n" " before killing job\n" " -v, --verbose verbose mode (multiple -v's increase verbosity)\n" @@ -1614,7 +1592,6 @@ static void _help(void) " --nice[=value] decrease secheduling priority by value\n" " -U, --account=name charge job to specified account\n" " --propagate[=rlimits] propagate all [or specific list of] rlimits\n" -" --mpi=type specifies version of MPI to use\n" " --task-prolog=program run \"program\" before launching task\n" " --task-epilog=program run \"program\" after launching task\n" " --begin=time defer job until HH:MM DD/MM/YY\n" diff --git a/src/slaunch/opt.c b/src/slaunch/opt.c index 83ce6fc5d70d3bb842b8024778dc43a7f80b7657..8e60b3c48c508ccba553938dbcb0aa539a5ef572 100644 --- a/src/slaunch/opt.c +++ b/src/slaunch/opt.c @@ -239,6 +239,20 @@ static int _verify_cpu_bind(const char *arg, char **cpu_bind, if (!mappos) { mappos = strchr(pos,'='); } + if (strncasecmp(pos, "help", 4) == 0) { + printf("CPU bind options:\n" + "\tq[uiet], quietly bind before task runs (default)\n" + "\tv[erbose], verbosely report binding before task runs\n" + "\tno[ne] don't bind tasks to CPUs (default)\n" + "\trank bind by task rank\n" + "\tmap_cpu:<list> bind by mapping CPU IDs to tasks as specified\n" + "\t where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" + "\tmask_cpu:<list> bind by setting CPU masks on tasks as specified\n" + "\t where <list> is <mask1>,<mask2>,...<maskN>\n"); + return 1; + + } + if (strncasecmp(pos, "quiet", 5) == 0) { fl_cpubind_verbose=0; pos+=5; @@ -336,6 +350,20 @@ static int _verify_mem_bind(const char *arg, char **mem_bind, if (!mappos) { mappos = strchr(pos,'='); } + if (strncasecmp(pos, "help", 4) == 0) { + printf("Memory bind options:\n" + "\tq[uiet], quietly bind before task runs (default)\n" + "\tv[erbose], verbosely report binding before task runs\n" + "\tno[ne] don't bind tasks to memory (default)\n" + "\trank bind by task rank\n" + "\tlocal bind to memory local to processor\n" + "\tmap_mem:<list> bind by mapping memory of CPU IDs to tasks as specified\n" + "\t where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" + "\tmask_mem:<list> bind by setting menory of CPU masks on tasks as specified\n" + "\t where <list> is <mask1>,<mask2>,...<maskN>\n"); + return 1; + + } if (strncasecmp(pos, "quiet", 5) == 0) { fl_membind_verbose = 0; pos+=5; @@ -569,7 +597,6 @@ static void _opt_default() opt.cpu_bind = NULL; opt.mem_bind_type = 0; opt.mem_bind = NULL; - opt.time_limit = -1; opt.relative = (uint16_t)NO_VAL; opt.relative_set = false; @@ -667,7 +694,6 @@ env_vars_t env_vars[] = { {"SLAUNCH_LABELIO", OPT_INT, &opt.labelio, NULL }, {"SLAUNCH_OVERCOMMIT", OPT_OVERCOMMIT,NULL, NULL }, {"SLAUNCH_REMOTE_CWD", OPT_STRING, &opt.cwd, NULL }, - {"SLAUNCH_TIMELIMIT", OPT_INT, &opt.time_limit, NULL }, {"SLAUNCH_WAIT", OPT_INT, &opt.max_wait, NULL }, {"SLAUNCH_MPI_TYPE", OPT_MPI, NULL, NULL }, {"SLAUNCH_SRUN_COMM_IFHN",OPT_STRING, &opt.ctrl_comm_ifhn,NULL }, @@ -835,8 +861,7 @@ void set_options(const int argc, char **argv) {"help", no_argument, 0, 'h'}, {"slaunch-input", required_argument, 0, 'i'}, {"task-input", required_argument, 0, 'I'}, - {"job-name", required_argument, 0, 'J'}, - {"no-kill", no_argument, 0, 'k'}, + {"name", required_argument, 0, 'J'}, {"kill-on-bad-exit", no_argument, 0, 'K'}, {"label", no_argument, 0, 'l'}, {"nodelist-byid", required_argument, 0, 'L'}, @@ -848,7 +873,6 @@ void set_options(const int argc, char **argv) {"overcommit", no_argument, 0, 'C'}, {"quiet", no_argument, 0, 'q'}, {"relative", required_argument, 0, 'r'}, - {"time", required_argument, 0, 't'}, {"unbuffered", no_argument, 0, 'u'}, {"task-layout-byid", required_argument, 0, 'T'}, {"verbose", no_argument, 0, 'v'}, @@ -886,7 +910,7 @@ void set_options(const int argc, char **argv) {NULL, 0, 0, 0} }; char *opt_string = - "+c:Cd:D:e:E:F:hi:I:J:kKlL:m:n:N:o:O:qr:t:T:uvVw:W:Y:"; + "+c:Cd:D:e:E:F:hi:I:J:KlL:m:n:N:o:O:qr:T:uvVw:W:Y:"; struct option *optz = spank_option_table_create (long_options); @@ -966,9 +990,6 @@ void set_options(const int argc, char **argv) xfree(opt.job_name); opt.job_name = xstrdup(optarg); break; - case 'k': - opt.no_kill = true; - break; case 'K': opt.kill_bad_exit = true; break; @@ -1018,9 +1039,6 @@ void set_options(const int argc, char **argv) opt.relative_set = true; opt.relative = _get_int(optarg, "relative start node"); break; - case 't': - opt.time_limit = _get_pos_int(optarg, "time"); - break; case 'T': xfree(opt.task_layout); opt.task_layout_byid = xstrdup(optarg); @@ -1776,9 +1794,6 @@ static bool _opt_verify(void) if (opt.max_wait) opt.max_exit_timeout = opt.max_wait; - if (opt.time_limit == 0) - opt.time_limit = INFINITE; - if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) opt.uid = opt.euid; @@ -1923,10 +1938,6 @@ static void _opt_list() info("label output : %s", tf_(opt.labelio)); info("unbuffered IO : %s", tf_(opt.unbuffered)); info("overcommit : %s", tf_(opt.overcommit)); - if (opt.time_limit == INFINITE) - info("time_limit : INFINITE"); - else - info("time_limit : %d", opt.time_limit); info("wait : %d", opt.max_wait); info("required nodes : %s", opt.nodelist); info("network : %s", opt.network); @@ -1970,6 +1981,8 @@ static void _usage(void) static void _help(void) { + slurm_ctl_conf_t *conf; + printf ( "Usage: slaunch [OPTIONS...] executable [args...]\n" "\n" @@ -1996,8 +2009,6 @@ static void _help(void) " (type = block|cyclic|hostfile)\n" " -J, --job-name=jobname name of job\n" " --jobid=id run under already allocated job\n" -" --mpi=type type of MPI being used\n" -" -b, --batch submit as batch job for later execution\n" " -W, --wait=sec seconds to wait after first task exits\n" " before killing job\n" " -v, --verbose verbose mode (multiple -v's increase verbosity)\n" @@ -2011,32 +2022,19 @@ static void _help(void) " --epilog=program run \"program\" after launching job step\n" " --task-prolog=program run \"program\" before launching task\n" " --task-epilog=program run \"program\" after launching task\n" -" --ctrl-comm-ifhn=addr interface hostname for PMI commaunications from slaunch\n" +" --ctrl-comm-ifhn=addr hostname for PMI communications with slaunch\n" " --multi-prog if set the program name specified is the\n" " configuration specificaiton for multiple programs\n" -" -w, --nodelist=hosts... request a specific list of hosts\n" -"\n" -"Affinity/Multi-core options: (when the task/affinity plugin is enabled)\n" -" --cpu_bind= Bind tasks to CPUs\n" -" q[uiet], quietly bind before task runs (default)\n" -" v[erbose], verbosely report binding before task runs\n" -" no[ne] don't bind tasks to CPUs (default)\n" -" rank bind by task rank\n" -" map_cpu:<list> bind by mapping CPU IDs to tasks as specified\n" -" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" -" mask_cpu:<list> bind by setting CPU masks on tasks as specified\n" -" where <list> is <mask1>,<mask2>,...<maskN>\n" -" --mem_bind= Bind tasks to memory\n" -" q[uiet], quietly bind before task runs (default)\n" -" v[erbose], verbosely report binding before task runs\n" -" no[ne] don't bind tasks to memory (default)\n" -" rank bind by task rank\n" -" local bind to memory local to processor\n" -" map_mem:<list> bind by mapping memory of CPU IDs to tasks as specified\n" -" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" -" mask_mem:<list> bind by setting menory of CPU masks on tasks as specified\n" -" where <list> is <mask1>,<mask2>,...<maskN>\n"); - +" -w, --nodelist=hosts... request a specific list of hosts\n"); + conf = slurm_conf_lock(); + if (conf->task_plugin != NULL + && strcasecmp(conf->task_plugin, "task/affinity") == 0) { + printf( +" --cpu_bind= Bind tasks to CPUs(\"--cpu_bind=help\" for options\n" +" --mem_bind= Bind tasks to memory(\"--mem_bind=help\" for options\n" + ); + } + slurm_conf_unlock(); spank_print_options (stdout, 6, 30); printf("\n"); @@ -2048,7 +2046,7 @@ static void _help(void) #endif "Help options:\n" -" --help show this help message\n" +" -h, --help show this help message\n" " --usage display brief usage message\n" "\n" "Other options:\n" diff --git a/src/slaunch/opt.h b/src/slaunch/opt.h index 88936f212051edcd4f0f5dc267d99c5c8e1ae0d8..b82796c2087bede99bd3ccd6f661a8aef341ef2f 100644 --- a/src/slaunch/opt.h +++ b/src/slaunch/opt.h @@ -76,7 +76,6 @@ typedef struct slaunch_options { char *cpu_bind; /* binding map for map/mask_cpu */ mem_bind_type_t mem_bind_type; /* --mem_bind= */ char *mem_bind; /* binding map for map/mask_mem */ - int time_limit; /* --time, -t */ enum task_dist_states distribution; /* --distribution=, -m dist */ bool distribution_set;