Skip to content
Snippets Groups Projects
Commit 18e706c3 authored by Morris Jette's avatar Morris Jette
Browse files

Merge branch 'slurm-14.03'

Conflicts:
	META
	NEWS
parents ae61e599 d2e53968
No related branches found
No related tags found
No related merge requests found
...@@ -86,6 +86,9 @@ documents those changes that are of interest to users and admins. ...@@ -86,6 +86,9 @@ documents those changes that are of interest to users and admins.
-- Create a new DebugFlags named TraceJobs in slurm.conf to print detailed information -- Create a new DebugFlags named TraceJobs in slurm.conf to print detailed information
about jobs in slurmctld. The information include job ids, state and node count. about jobs in slurmctld. The information include job ids, state and node count.
* Changes in Slurm 14.03.5
==========================
* Changes in Slurm 14.03.4 * Changes in Slurm 14.03.4
========================== ==========================
-- Fix issue where not enforcing QOS but a partition either allows or denies -- Fix issue where not enforcing QOS but a partition either allows or denies
...@@ -124,7 +127,7 @@ documents those changes that are of interest to users and admins. ...@@ -124,7 +127,7 @@ documents those changes that are of interest to users and admins.
-- Keep supporting 'srun -N x --pty bash' for historical reasons. -- Keep supporting 'srun -N x --pty bash' for historical reasons.
-- If EnforcePartLimits=Yes and QOS job is using can override limits, allow -- If EnforcePartLimits=Yes and QOS job is using can override limits, allow
it. it.
-- Fix issues if partition allows or denys account's or QOS' and either are -- Fix issues if partition allows or denies account's or QOS' and either are
not set. not set.
-- If a job requests a partition and it doesn't allow a QOS or account the -- If a job requests a partition and it doesn't allow a QOS or account the
job is requesting pend unless EnforcePartLimits=Yes. Before it would job is requesting pend unless EnforcePartLimits=Yes. Before it would
......
...@@ -228,7 +228,7 @@ expect { ...@@ -228,7 +228,7 @@ expect {
wait wait
} }
} }
set $core_cnt [expr $core_cnt * $socket_cnt] set core_cnt [expr $core_cnt * $socket_cnt]
if {$core_cnt == 0} { if {$core_cnt == 0} {
send_user "\nFAILURE: sbatch did not find the number of cores\n" send_user "\nFAILURE: sbatch did not find the number of cores\n"
exit 1 exit 1
...@@ -241,6 +241,7 @@ if {$core_cnt < 4} { ...@@ -241,6 +241,7 @@ if {$core_cnt < 4} {
# #
# Using the core spec within the node limits # Using the core spec within the node limits
# #
send_user "\n\nRun within the specified node\n"
core_spec_job 0 $first_node [expr $core_cnt - 2] 0 core_spec_job 0 $first_node [expr $core_cnt - 2] 0
core_spec_job -2 $first_node [expr $core_cnt - 2] 0 core_spec_job -2 $first_node [expr $core_cnt - 2] 0
...@@ -248,12 +249,14 @@ core_spec_job -2 $first_node [expr $core_cnt - 2] 0 ...@@ -248,12 +249,14 @@ core_spec_job -2 $first_node [expr $core_cnt - 2] 0
# Using core spec with more tasks then the node can handle. This should # Using core spec with more tasks then the node can handle. This should
# cause the tasks to spread accross mutliple nodes as needed # cause the tasks to spread accross mutliple nodes as needed
# #
send_user "\n\nSpread job across multiple nodes\n"
core_spec_job 1 $first_node [expr $core_cnt - 2] 1 core_spec_job 1 $first_node [expr $core_cnt - 2] 1
core_spec_job 1 $first_node [expr $core_cnt - 1] 1 core_spec_job 1 $first_node [expr $core_cnt - 1] 1
# #
# Using core spec with more cores then the specified node has # Using core spec with more cores then the specified node has
# #
send_user "\n\nFail by trying to use more cores than exist\n"
core_spec_job 1 $first_node [expr $core_cnt + 5] -1 core_spec_job 1 $first_node [expr $core_cnt + 5] -1
core_spec_job 1 $first_node [expr $core_cnt + 7] -1 core_spec_job 1 $first_node [expr $core_cnt + 7] -1
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment