Skip to content
Snippets Groups Projects
Commit c76feb3d authored by Mark Grondona's avatar Mark Grondona
Browse files

Initial revision

parent 3752d6b0
No related branches found
No related tags found
No related merge requests found
AUTOMAKE_OPTIONS = foreign
EXTRA_DIST = auxdir include etc
# don't include lib and doc 'til we have something there
#SUBDIRS = lib src doc
SUBDIRS = src
dnl aclocal.m4 generated automatically by aclocal 1.4-p5
dnl Copyright (C) 1994, 1995-8, 1999, 2001 Free Software Foundation, Inc.
dnl This file is free software; the Free Software Foundation
dnl gives unlimited permission to copy and/or distribute it,
dnl with or without modifications, as long as this notice is preserved.
dnl This program is distributed in the hope that it will be useful,
dnl but WITHOUT ANY WARRANTY, to the extent permitted by law; without
dnl even the implied warranty of MERCHANTABILITY or FITNESS FOR A
dnl PARTICULAR PURPOSE.
# Like AC_CONFIG_HEADER, but automatically create stamp file.
AC_DEFUN([AM_CONFIG_HEADER],
[AC_PREREQ([2.12])
AC_CONFIG_HEADER([$1])
dnl When config.status generates a header, we must update the stamp-h file.
dnl This file resides in the same directory as the config header
dnl that is generated. We must strip everything past the first ":",
dnl and everything past the last "/".
AC_OUTPUT_COMMANDS(changequote(<<,>>)dnl
ifelse(patsubst(<<$1>>, <<[^ ]>>, <<>>), <<>>,
<<test -z "<<$>>CONFIG_HEADERS" || echo timestamp > patsubst(<<$1>>, <<^\([^:]*/\)?.*>>, <<\1>>)stamp-h<<>>dnl>>,
<<am_indx=1
for am_file in <<$1>>; do
case " <<$>>CONFIG_HEADERS " in
*" <<$>>am_file "*<<)>>
echo timestamp > `echo <<$>>am_file | sed -e 's%:.*%%' -e 's%[^/]*$%%'`stamp-h$am_indx
;;
esac
am_indx=`expr "<<$>>am_indx" + 1`
done<<>>dnl>>)
changequote([,]))])
# Do all the work for Automake. This macro actually does too much --
# some checks are only needed if your package does certain things.
# But this isn't really a big deal.
# serial 1
dnl Usage:
dnl AM_INIT_AUTOMAKE(package,version, [no-define])
AC_DEFUN([AM_INIT_AUTOMAKE],
[AC_REQUIRE([AC_PROG_INSTALL])
PACKAGE=[$1]
AC_SUBST(PACKAGE)
VERSION=[$2]
AC_SUBST(VERSION)
dnl test to see if srcdir already configured
if test "`cd $srcdir && pwd`" != "`pwd`" && test -f $srcdir/config.status; then
AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
fi
ifelse([$3],,
AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package]))
AC_REQUIRE([AM_SANITY_CHECK])
AC_REQUIRE([AC_ARG_PROGRAM])
dnl FIXME This is truly gross.
missing_dir=`cd $ac_aux_dir && pwd`
AM_MISSING_PROG(ACLOCAL, aclocal, $missing_dir)
AM_MISSING_PROG(AUTOCONF, autoconf, $missing_dir)
AM_MISSING_PROG(AUTOMAKE, automake, $missing_dir)
AM_MISSING_PROG(AUTOHEADER, autoheader, $missing_dir)
AM_MISSING_PROG(MAKEINFO, makeinfo, $missing_dir)
AC_REQUIRE([AC_PROG_MAKE_SET])])
#
# Check to make sure that the build environment is sane.
#
AC_DEFUN([AM_SANITY_CHECK],
[AC_MSG_CHECKING([whether build environment is sane])
# Just in case
sleep 1
echo timestamp > conftestfile
# Do `set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
set X `ls -Lt $srcdir/configure conftestfile 2> /dev/null`
if test "[$]*" = "X"; then
# -L didn't work.
set X `ls -t $srcdir/configure conftestfile`
fi
if test "[$]*" != "X $srcdir/configure conftestfile" \
&& test "[$]*" != "X conftestfile $srcdir/configure"; then
# If neither matched, then we have a broken ls. This can happen
# if, for instance, CONFIG_SHELL is bash and it inherits a
# broken ls alias from the environment. This has actually
# happened. Such a system could not be considered "sane".
AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
alias in your environment])
fi
test "[$]2" = conftestfile
)
then
# Ok.
:
else
AC_MSG_ERROR([newly created file is older than distributed files!
Check your system clock])
fi
rm -f conftest*
AC_MSG_RESULT(yes)])
dnl AM_MISSING_PROG(NAME, PROGRAM, DIRECTORY)
dnl The program must properly implement --version.
AC_DEFUN([AM_MISSING_PROG],
[AC_MSG_CHECKING(for working $2)
# Run test in a subshell; some versions of sh will print an error if
# an executable is not found, even if stderr is redirected.
# Redirect stdin to placate older versions of autoconf. Sigh.
if ($2 --version) < /dev/null > /dev/null 2>&1; then
$1=$2
AC_MSG_RESULT(found)
else
$1="$3/missing $2"
AC_MSG_RESULT(missing)
fi
AC_SUBST($1)])
# Define a conditional.
AC_DEFUN([AM_CONDITIONAL],
[AC_SUBST($1_TRUE)
AC_SUBST($1_FALSE)
if $2; then
$1_TRUE=
$1_FALSE='#'
else
$1_TRUE='#'
$1_FALSE=
fi])
#!/bin/sh
#
# $Id$
# $Source$
#
echo "running autoheader ... "
autoheader
echo "running automake --add-missing ... "
automake --add-missing
echo "running autoconf ... "
autoconf
echo "removing stale config.status and config.log"
rm -f config.status config.log
configure 0 → 100755
This diff is collapsed.
# This file is to be processed with autoconf to generate a configure script
AC_INIT(src/common/slurm.h)
AC_CONFIG_AUX_DIR(auxdir)
AC_CANONICAL_SYSTEM
# determine project/version
dnl XXX fill this in from ./META or something ...
PROJECT="slurm"
AC_DEFINE_UNQUOTED(PROJECT, "$PROJECT", [Define the project's name.])
AC_SUBST(PROJECT)
VERSION="0.0"
AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Define the project's version.])
AC_SUBST(VERSION)
dnl Automake stuff.
AM_CONFIG_HEADER(config.h)
AM_INIT_AUTOMAKE(PROJECT, VERSION)
dnl Checks for programs.
AC_PROG_CC
AC_PROG_MAKE_SET
AC_PROG_RANLIB
dnl Checks for libraries
dnl Checks for header files.
AC_CHECK_HEADERS(popt.h mcheck.h unistd.h)
AC_CHECK_HEADERS(strings.h values.h)
dnl Checks for types.
AC_GCC_TRADITIONAL
dnl Checks for structures.
dnl Checks for compiler characteristics.
dnl checks for library functions.
AC_CHECK_FUNCS(strerror mtrace)
AC_SEARCH_LIBS([pthread_create], [pthread c_r cr], [],
[AC_MSG_ERROR([Error: Cannot find the pthread library])])
AC_CHECK_LIB(popt, poptGetNextOpt, [got_popt=yes], [got_popt=no])
AM_CONDITIONAL(USE_INCLUDED_POPT, test x$got_popt = xno)
if test "$got_popt" = "no"; then
AC_MSG_RESULT([using included popt package...])
POPT_LIBS="src/popt/libpopt.a"
POPT_INCLUDES="-Isrc/popt"
else
POPT_LIBS="-lpopt"
fi
AC_SUBST(POPT_LIBS)
AC_SUBST(POPT_INCLUDES)
dnl checks for system services.
AC_OUTPUT([Makefile src/Makefile src/common/Makefile src/popt/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmctld/Makefile src/scancel/Makefile src/squeue/Makefile src/scontrol/Makefile])
EXTRA_DIST = txt html
SUBDIRS = pubdesign
.TH SRUN "1" "February 2002" "srun 0.1" "slurm components"
.SH "NAME"
srun \- run parallel jobs
.SH SYNOPSIS
.B srun
[\fIOPTIONS\fR...] \fIexecutable \fR[\fIargs\fR...]
.br
.B srun
\-\-allocate [\fIOPTIONS\fR...] [job_script]
.br
.B srun
\-\-attach=jobid
.SH DESCRIPTION
Allocate resources and optionally initiate parallel jobs on
clusters managed by SLURM.
.TP
parallel run options
.TP
\fB\-n\fR, \fB\-\-nprocs\fR=\fInprocs\fR
Specify the number of processes to run. Request that
.B srun
allocate \fInprocs\fR processes. Specification of the number of processes
per node may be achieved with the
.B -c
and
.B -N
options. If unspecified, the default is one process.
.TP
\fB\-c\fR, \fB\-\-cpus\fR=\fIncpus\fR
Request that \fIncpus\fR be allocated \fBper process\fR. This may be
useful if the job will be multithreaded and requires more than one cpu
per task for optimal performance. The default is one cpu per process.
.TP
\fB\-N\fR, \fB\-\-nodes\fR=\fInnodes\fR
Request that \fInnodes\fR nodes be allocated to this job. The default
is to allocate one cpu per process, such that nodes with one cpu will
run one process, nodes with 2 cpus will be allocated 2 processes, etc.
The distribution of processes across nodes may be controlled using this
option along with the
.B -n
and
.B -c
options.
.TP
\fB\-p\fR, \fB\-\-partition\fR=\fIpartition\fR
Request resources from partition "\fIpartition\fR." Partitions
are created by the slurm administrator.
.TP
\fB\-\-cddir\fR=\fIpath\fR
have the remote processes do a chdir to \fIpath\fR before beginning
execution. The default is to chdir to the current working directory
of the \fBsrun\fR process.
.TP
\fB\-I\fR, \fB\-\-immediate\fR
exit if resources are not immediately
available. By default, \fB\-\-immediate\fR is off, and
.B srun
will block until resources become available.
.TP
\fB\-O\fR, \fB\-\-overcommit\fR
overcommit resources. Normally,
.B srun
will not allocate more than one process to a cpu. By specifying
\fB\-\-overcommit\fR you are explicitly allowing more than one process
per cpu.
.TP
\fB\-l\fR, \fB\-\-label\fR
prepend task number to lines of stdout/err. Normally, stdout and stderr
from remote tasks is line-buffered directly to the stdout and stderr of
.B srun
. The \fB\-\-label\fR option will prepend lines of output with the remote
task id.
.TP
\fB\-m\fR, \fB\-\-distribution\fR=(\fIblock\fR|\fIcyclic\fR)
Specify an alternate distribution method for remote processes.
.RS
.TP
.B block
The block method of distribution will allocate processes in-order to
the cpus on a node. This is the default behavior.
.TP
.B cyclic
The cyclic method distributes processes in a round-robin fashion across
the allocated nodes. That is, process 1 will be allocated to the first
node, process 2 to the second, and so on.
.RE
.TP
\fB\-J\fR, \fB\-\-job\-name\fR=\fIjobname\fR
Specify a name for the job. The specified name will appear along with
the job id number when querying running jobs on the system. The default
is an empty name.
.TP
\fB\-o\fR, \fB\-\-output\fR=\fIout\fR
Specify how stdout is to be directed. By default,
.B srun
collects stdout from all tasks and line buffers this output to
the attached terminal. With \fB\-\-output\fR stdout may be redirected
to a file, to one file per task, or to /dev/null. See \fBIO Redirection\fR
below.
.TP
\fB\-i\fR, \fB\-\-input\fR=\fIin\fR
Specify how stdin is to redirected. By default,
.B srun
redirects stdin to all tasks from /dev/null. See \fBIO Redirection\fR
below for more options.
.TP
\fB\-e\fR, \fB\-\-error\fR=\fIerr\fR
Specify how stderr is to be redirected. By default,
.B srun
redirects stderr to the same file as stdout, if one is specified. The
\fB\-\-error\fR option is provided to allow stdout and stderr to be
redirected to different locations.
See \fBIO Redirection\fR below for more options.
.TP
\fB\-b\fR, \fB\-\-batch\fR
Submit in "batch mode." \fBsrun\fR will allocate resources and "detach" -
starting another \fBsrun\fR on the first allocated node to manage the
job. stdin will be redirected from /dev/null, stdout and stderr will be
redirected to a file (default is \fIjobname\fR.out or \fIjobid\fR.out in
current working directory, see \fB\-o\fR for other IO options).
.TP
\fB\-v\fR, \fB\-\-verbose\fR
verbose operation. Multiple \fB-v\fR's will further increase the verbosity of
.B srun.
.TP
\fB\-d\fR, \fB\-\-debug\fR
enable debug output. Multiple \fB-d\fR's increase the debug level of
.B srun
.PP
Allocate options:
.TP
\fB\-A\fR, \fB\-\-allocate\fR
allocate resources and spawn a shell. When \fB\-\-allocate\fR is specified to
.B srun
, no remote tasks are started. Instead a subshell is started that has access
to the allocated resources. Multiple jobs can then be run on the same cpus
from within this subshell. See \fBAllocate Mode\fR below.
.PP
Attach to running job:
.TP
\fB\-a\fR, \fB\-\-attach\fR=\fIid\fR
This option will attach
.B srun
to a running job with job id = \fIid\fR. Provided that the calling user
has access to that running job, stdout and stderr will be redirected to the
current session and signals received by
.B srun
will be forwarded to the remote processes.
.TP
\fB\-j\fR, \fB\-\-join\fR
Join with running job. This will duplicate stdout/stderr to the calling
\fBsrun\fR. stdin and signals will not be propagated to the job.
\fB\-\-join\fR is only allowed with \fB\-\-attach\fR.
.TP
\fB\-s\fR, \fB\-\-steal\fR
Steal the connection to the running job. This will close any open
sessions with the specified job and allow stdin and signals to be propagated.
\fB\-\-steal\fR is only allowed with \fB\-\-attach\fR.
.PP
Constraint Options. The following options all put constraints on the nodes
that may be considered for the job:
.TP
\fB\-\-mincpus\fR=\fIn\fR
Specify minimum number of cpus per node
.TP
\fB\-\-mem\fR=\fIMB\fR
Specify a minimum amount of real memory
.TP
\fB\-\-vmem\fR=\fIMB\fR
Specify a minimum amount of virtual memory
.TP
\fB\-\-tmp\fR=\fIMB\fR
Specify a minimum amount of temporary disk space
.TP
\fB\-C\fR, \fB\-\-constraint\fR=\fIlist\fR
specify a list of constraints. The \fIlist\fR of constraints is
a comma separated list of features that have been assigned to the
nodes by the slurm administrator. If no nodes have the requested
feature, then the job will be rejected by the slurm job manager.
.TP
\fB\-\-contiguous\fR
demand a contiguous range of nodes. The default is on. Specify
--contiguous=no if a contiguous range of nodes is not a constraint.
.TP
\fB\-w\fR, \fB\-\-nodelist\fR=\fIhost1,host2,...\fR or \fIfilename\fR
request a specific list of hosts. The job will contain \fIat least\fR
these hosts. The list may be specified as a comma-separated list of
hosts, a range of hosts (host[1-5,7,...] for example), or a filename.
The host list will be assumed to be a filename if it contains a "/"
character.
.PP
Help options
.TP
-?, \fB\-\-help\fR
Show this help message
.TP
\fB\-\-usage\fR
Display brief usage message
.PP
Other options
.TP
\fB\-V\fR, \fB\-\-version\fR
output version information and exit
.PP
Unless the \fB\-a\fR (\fB\-\-attach\fR) or \fB-A\fR (\fB\-\-allocate\fR)
options are specified (see \fBAllocate mode\fR and \fBAttaching to jobs\fR
below),
.B srun
will submit the job request to the slurm job controller, then initiate all
processes on the remote nodes. If the request cannot be met immediately,
.B srun
will block until the resources are free to run the job. If the
\fB\-I\fR (\fB\-\-immediate\fR) option is specified
.B srun
will terminate if resources are not immediately available.
.PP
When initiating remote processes
.B srun
will propagate the current working directory, unless
\fB\-\-cddir\fR=\fIpath\fR is specified, in which case \fIpath\fR will
become the working directory for the remote processes.
.PP
The \fB-n\fB, \fB-c\fR, and \fB-N\fR options control how CPUs and
nodes will be allocated to the job. When specifying only the number
of processes to run with \fB-n\fR, a default of one CPU per process
is allocated. By specifying the number of CPUs required per task (\fB-c\fR),
more than one CPU may be allocated per process. If the number of nodes
is specified with \fB-N\fR,
.B srun
will attempt to allocate \fIat least\fR the number of nodes specified.
.PP
Combinations of the above three options may be used to change how
processes are distributed across nodes and cpus. For instance, by specifying
both the number of processes and number of nodes on which to run, the
number of processes per node is implied. However, if the number of CPUs
per process is more important then number of processes (\fB-n\fR) and the
number of CPUs per process (\fB-c\fR) should be specified.
.PP
.B srun
will refuse to allocate more than one process per CPU unless
\fB\-\-overcommit\fR (\fB\-O\fR) is also specified.
.PP
.B srun
will attempt to meet the above specifications "at a minimum." That is,
if 16 nodes are requested for 32 processes, and some nodes do not have
2 CPUs, the allocation of nodes will be increased in order to meet the
demand for CPUs. In other words, a \fIminimum\fR of 16 nodes are being
requested. However, if 16 nodes are requested for 15 processes,
.B srun
will consider this an error, as 15 processes cannot run across 16 nodes.
.PP
.B "IO Redirection"
.PP
By default stdout and stderr will be redirected from all tasks to the
stdout and stderr of
.B srun
, and stdin will be redirected from /dev/null to all tasks. This
behavior may be changed with the \fB\-\-output\fR, \fB\-\-error\fR,
and \fB\-\-input\fR (\fB\-o\fR, \fB\-e\fR, \fB\-i\fR) options. Valid
arguments to these options are
.TP 10
all
stdout stderr is redirected from all tasks to srun (This is the default).
stdin is forwarded to all tasks.
.TP
none
stdout and stderr are redirected to /dev/null.
stdin is redirected from /dev/null (This is the default for stdin)
.TP
filename
stdout and stderr are redirected to the named file (relative to the
current working directory of the job). stdin is redirected from the
named file.
.TP
format string
If a format string is provided (such as "output.%d"),
.B srun
will open one file per task passing the task id as the argument to
the format string. The format specifier may be any valid printf
format, as long as it takes a numeric argument.
.PP
.PP
.B "Allocate Mode"
.PP
When the allocate option is specified (\fB\-A\fR, \fB\-\-allocate\fR)
\fBsrun\fR will not initiate any remote processes after acquiring
resources. Instead, \fBsrun\fR will spawn a subshell which has access
to the acquired resources. Subsequent instances of \fBsrun\fR from within
this subshell will then run on these resources.
.PP
If the name of a script is specified on the
commandline with \fB\-\-allocate\fR, the spawned shell will run the
specified script. Resources allocated in this way will only be freed
when the subshell terminates.
.PP
.B "Attaching to a running job"
.PP
Use of the \fB-a\fR \fIjobid\fR (or \fB\-\-attach\fR) option allows
\fBsrun\fR to reattach to a running job, receiving stdout and stderr
from the job and forwarding signals to the job, just as if the current
session of \fBsrun\fR had started the job. (stdin, however, cannot
be forwarded to the job).
.PP
There are two ways to reattach to a running job. The default method
is to steal any current connections to the job. In this case, the
\fBsrun\fR process currently managing the job will be terminated, and
control will be relegated to the caller. To allow the current
\fBsrun\fR to continue managing the running job, the \fB\-j\fB
(\fB\-\-join\fR) option may be specified. When joining with the
running job, stdout and stderr are duplicated to the new \fBsrun\fR
session, but signals are not forwarded to the remote job.
.PP
Node and CPU selection options do not make sense when specifying
\fB\-\-attach\fR, and it is an error to use \fB-n\fR, \fB-c\fR,
or \fB-N\fR in attach mode.
.PP
.SH "ENVIRONMENT VARIABLES"
.PP
Some
.B srun
options may be set via environment variables. These environment
variables, along with their corresponding options, are listed below.
(Note: commandline options will always override these settings)
.TP 20
SLURM_NPROCS
\fB\-n, \-\-nprocs\fR=\fIn\fR
.TP
SLURM_CPUS_PER_TASK
\fB\-c, \-\-ncpus\fR=\fIn\fR
.TP
SLURM_NNODES
\fB\-N, \-\-nodes\fR=\fIn\fR
.TP
SLURM_PARTITION
\fB\-p, --partition\fR=\fIpartition\fR
.TP
SLURM_STDOUTMODE
\fB\-o, \-\-output\fR=\fImode\fR
.TP
SLURM_STDINMODE
\fB\-i, \-\-input\fR=\fImode\fR
.TP
SLURM_STDERRMODE
\fB\-e, \-\-error\fR=\fImode\fR
.TP
SLURM_DISTRIBUTION
\fB\-m, \-\-distribution\fR=(\fIblock|cyclic\fR)
.TP
SLURM_DEBUG
\fB\-d, \-\-debug\fR
.PP
Additionally,
.B srun
will set some environment variables in the environment of the
executing tasks on the remote compute nodes. These environment variables
are:
.TP 20
SLURM_JOBID
job id of the executing job.
.TP
SLURM_RANK
the MPI rank of the current process
.TP
SLURM_NPROCS
total number of processes in the current job
.TP
SLURM_NODELIST
list of nodes that the slurm job is executing on.
.TP
.SH "SEE ALSO"
#
# Sample /etc/SLURM.conf
# Author: John Doe
# Date: 11/06/2001
#
Administrators=cdunlap,garlick,grondo,jette
#
ControlMachine=linux
BackupController=lx02
#
NodeSpecConf=/usr/local/SLURM/NodeSpecConf
PartitionConf=/usr/local/SLURM/PartitionConf
#
MasterDaemon=/usr/local/SLURM/Slurmd.Master"
InitProgram=/usr/local/SLURM/Slurmd.Prolog"
ControlDaemon=/usr/local/SLURM/Slurmd.Control"
ServerDaemon=/usr/local/SLURM/Slurmd.Server"
ControllerTimeout=120
ServerTimeout=90
#
# Sample /etc/SLURM.conf2
# Author: John Doe
# Date: 11/06/2001
#
Administrators=cdunlap,garlick,grondo,jette
#
ControlMachine=linux
BackupController=blue199.pacific.llnl.gov
#
NodeSpecConf=/g/g0/jette/slurm-code/etc/sample.node.conf2
PartitionConf=/g/g0/jette/slurm-code/etc/sample.part.conf2
#
ControlDaemon=/g/g0/jette/slurm-code/Controller
ServerDaemon=/g/g0/jette/slurm-code/Server
#
# Sample sample.job.conf2
# Author: John Doe
# Date: 12/20/2001
#
User=FAILS_Node_Down JobType=INTERACTIVE NodeCount=1 MaxTime=40 NodeList=lx01
User=jette JobType=INTERACTIVE NodeCount=2 MaxTime=40 NodeList=lx11,lx12
User=userforlx08-09 JobType=INTERACTIVE NodeCount=2 MaxTime=40 MinTmpDisk=8000
User=anyuser JobType=BATCH NodeCount=2 MaxTime=40 NodeList=lx11,lx12
User=FAILS_JobType JobType=Batch NodeCount=2 MinTmpDisk=1000 MaxTime=40 NodeList=lx07,lx12
User=FAILS_Tmp_Disk JobType=BATCH NodeCount=2 MinTmpDisk=8000 MaxTime=40 NodeList=lx07,lx12
User=jette JobType=BATCH NodeCount=4 Partition=pbatch MinRealMemory=2048 Comment=lx13-16
User=userfor_lx14 JobType=BATCH NodeCount=1 Partition=pbatch MinCpus=32
User=userfor_lx12-14 JobType=BATCH NodeCount=3 Partition=pbatch MinVirtualMemory=4000
User=jette JobType=BATCH NodeCount=29
User=anyuser JobType=BATCH NodeCount=4 MinRealMemory=2048 Continguous=FALSE Comment=lx13-16
User=userforlx13-16 JobType=BATCH NodeCount=4 MinRealMemory=2048 Continguous=TRUE
User=userforlx18-25 JobType=BATCH NodeCount=8 Partition=pbatch
#
# Sample sample.node.conf
# Author: John Doe
# Date: 11/06/2001
#
Name=DEFAULT OS=Linux.2.4.7-1 CPUs=2 Speed=1.0 RealMemory=2048 VirtualMemory=4096 TmpDisk=16384 Partition=1
#
Name=lx01 Partition=
Name=lx02
Name=lx03 Speed=1.5 RealMemory=3072 Partition=1,2
Name=lx04 CPUs=1 Speed=1.3 Partition=1,3
Name=lx05
Name=lx06
#
Name=DEFAULT OS=Linux3.0 CPUs=4 Speed=1.6, Partition=9
Name=mx01
Name=mx02 Pool=5 RealMemory=567
#
# Sample sample.node.conf2
# Author: John Doe
# Date: 11/06/2001
#
Name=DEFAULT OS=Linux.2.4.7-1 CPUs=16 Speed=345.0 RealMemory=2048 VirtualMemory=4096 TmpDisk=16384 State=IDLE
#
# lx01-lx02 for login only, no state is DOWN for SLURM initiated jobs
Name=lx01 State=DOWN
Name=lx02 State=DOWN
#
# lx03-lx09 for partitions 1 (debug) and 3 (super)
Name=DEFAULT Partition=1,3
Name=lx03
Name=lx04
Name=lx05
Name=lx06
Name=lx07 TmpDisk=4096
Name=lx08
Name=lx09
#
# lx10-lx30 for partitions 0 (pbatch) and 3 (super)
Name=DEFAULT Partition=0,3
Name=lx10
Name=lx11 VirtualMemory=2048
Name=lx12 RealMemory=1024
Name=lx13
Name=lx14 CPUs=32
Name=lx15
Name=lx16
Name=lx17 State=DOWN
Name=lx18
Name=lx19
Name=lx20
Name=lx21
Name=lx22 CPUs=8
Name=lx23
Name=lx24
Name=lx25
Name=lx26
Name=lx27
Name=lx28
Name=lx29
Name=lx30
#
# lx31-lx32 for partitions 4 (class) and 3 (super)
Name=DEFAULT Partition=3,4
Name=lx31
Name=lx32
#
# Example sample.part.conf
# Author: John Doe
# Date: 12/14/2001
#
Name=DEFAULT JobType=Batch
#
Name=DEFAULT JobType=INTERACTIVE MaxCpus=16
Name=pbatch Number=0 JobType=BATCH MaxCpus=128 MaxTime=UNLIMITED
Name=debug Number=1 MaxCpus=4 MaxTime=60
Name=super Number=3 JobType=BATCH MaxCpus=256 AllowUsers=cdunlap,garlick,jette
Name=class Number=4 JobType=ALL MaxCpus=UNLIMITED AllowUsers=student1,student2,student3
#
# Example sample.part.conf2
# Author: John Doe
# Date: 12/14/2001
#
Name=pbatch Number=0 JobType=BATCH MaxCpus=128 MaxTime=UNLIMITED
Name=debug Number=1 JobType=INTERACTIVE MaxCpus=16 MaxTime=60
Name=super Number=3 JobType=ALL MaxCpus=UNLIMITED MaxTime=UNLIMITED AllowUsers=cdunlap,garlick,jette
Name=class Number=4 JobType=ALL MaxCpus=16 MaxTime=10 AllowUsers=student1,student2,student3
popt_dirs =
if USE_INCLUDED_POPT
popt_dirs = popt
endif
# add subdirs "scancel", "squeue", and "slurmd" as soon as something is there:
SUBDIRS = common slurmctld srun $(popt_dirs)
# Makefile for common library
AUTOMAKE_OPTIONS = foreign
noinst_LIBRARIES = libcommon.a
libcommon_a_SOURCES = list.c
noinst_HEADERS = list.h slurm.h
Copyright (c) 1998 Red Hat Software
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the X Consortium shall not be
used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from the X Consortium.
# Makefile for popt.
EXTRA_DIST = README COPYING
#INCLUDES = -I$(top_srcdir) -I$(includedir)
INCLUDES =
noinst_LIBRARIES = libpopt.a
libpopt_a_SOURCES = popt.c findme.c poptparse.c popthelp.c poptconfig.c
noinst_HEADERS = popt.h system.h findme.h poptint.h
This is the popt command line option parsing library. While it is similiar
to getopt(3), it contains a number of enhancements, including:
1) popt is fully reentrant
2) popt can parse arbitrary argv[] style arrays while
getopt(2) makes this quite difficult
3) popt allows users to alias command line arguments
4) popt provides convience functions for parsting strings
into argv[] style arrays
popt is used by rpm, the Red Hat install program, and many other Red Hat
utilities, all of which provide excellent examples of how to use popt.
Complete documentation on popt is available in popt.ps (included in this
tarball), which is excerpted with permission from the book "Linux
Application Development" by Michael K. Johnson and Erik Troan (availble
from Addison Wesley in May, 1998).
Comments on popt should be addressed to ewt@redhat.com.
/* (C) 1998 Red Hat Software, Inc. -- Licensing details are in the COPYING
file accompanying popt source distributions, available from
ftp://ftp.redhat.com/pub/code/popt */
#include "system.h"
#include "findme.h"
const char * findProgramPath(const char * argv0) {
char * path = getenv("PATH");
char * pathbuf;
char * start, * chptr;
char * buf;
/* If there is a / in the argv[0], it has to be an absolute path */
if (strchr(argv0, '/'))
return xstrdup(argv0);
if (!path) return NULL;
start = pathbuf = alloca(strlen(path) + 1);
buf = malloc(strlen(path) + strlen(argv0) + sizeof("/"));
strcpy(pathbuf, path);
chptr = NULL;
do {
if ((chptr = strchr(start, ':')))
*chptr = '\0';
sprintf(buf, "%s/%s", start, argv0);
if (!access(buf, X_OK))
return buf;
if (chptr)
start = chptr + 1;
else
start = NULL;
} while (start && *start);
free(buf);
return NULL;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment