diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am index 43eb5659ae314ca321676213ebc918723a48d85e..7d4adb5e8698741df8296f0a41e0aec75c64c2dd 100644 --- a/testsuite/expect/Makefile.am +++ b/testsuite/expect/Makefile.am @@ -124,7 +124,6 @@ EXTRA_DIST = \ test1.96 \ test1.96.prog.c \ test1.97 \ - test1.98 \ test1.99 \ test1.100 \ test1.101 \ diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in index de7c8d9ff32f64dc171969e600e3c5e9882a5025..1cc42cb20a865b86bae79f34a4358735fe4ce6d0 100644 --- a/testsuite/expect/Makefile.in +++ b/testsuite/expect/Makefile.in @@ -538,7 +538,6 @@ EXTRA_DIST = \ test1.96 \ test1.96.prog.c \ test1.97 \ - test1.98 \ test1.99 \ test1.100 \ test1.101 \ diff --git a/testsuite/expect/README b/testsuite/expect/README index 9d822fe4617eef14359bf6cbcbaa2e5d507f21a2..4bc34d8e3a1ac93bfe83ffa9d4615d3751e0f464 100644 --- a/testsuite/expect/README +++ b/testsuite/expect/README @@ -205,7 +205,7 @@ test1.94 Test of MPICH2 task spawn logic test1.95 Basic UPC (Unified Parallel C) test via srun. test1.96 Basic SHMEM test via srun. test1.97 Test that --ntask-per-node and -c options are enforced -test1.98 Test of --spread-job option +test1.98 DEFUNCT test1.99 Validate that SrunPortRange is enforced when using srun test1.100 Test of pack/nopack task distribution. test1.101 Test of --bcast option. diff --git a/testsuite/expect/test1.98 b/testsuite/expect/test1.98 deleted file mode 100755 index c34fc44cc0b84f54fc2523497e21c0c04631f3d4..0000000000000000000000000000000000000000 --- a/testsuite/expect/test1.98 +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of --spread-job option -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2016 SchedMD LLC -# -# This file is part of SLURM, a resource management program. -# For details, see <http://slurm.schedmd.com/>. -# Please also read the included file: DISCLAIMER. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.98" - -print_header $test_id - -if {[test_alps]} { - send_user "\nWARNING: This test is incompatible with ALPS systems\n" - exit $exit_code -} elseif {[test_serial]} { - send_user "\nWARNING: This test is incompatible with serial system\n" - exit 0 -} elseif {[test_bluegene]} { - send_user "\nWARNING: This test is incompatible with bluegene system\n" - exit 0 -} elseif { [test_xcpu] } { - send_user "\nWARNING: This test is incompatible with XCPU systems\n" - exit 0 -} - -if {[test_launch_poe]} { - set node_name_env MP_I_UPMD_HOSTNAME -} else { - set node_name_env SLURMD_NODENAME -} - -# -# Run two tasks per idle node in default partition, up to 64 tasks -# -set node_cnt 2 -set partition [default_partition] -set task_cnt [available_nodes $partition "idle"] -if {$task_cnt < $node_cnt} { - send_user "\nWARNING: not enough nodes currently available ($task_cnt avail, $node_cnt needed)\n" - exit 0 -} -send_user "TEST 1\n" -incr task_cnt $task_cnt -if {$task_cnt > 64} { - set task_cnt 64 -} - -for {set inx 0} {$inx < $task_cnt} {incr inx} { - set host($inx) "" -} -set srun_pid [spawn $srun -n $task_cnt -l -t1 --spread-job $bin_printenv $node_name_env] -expect { - -re "($number): *($alpha_numeric_under)" { - set host($expect_out(1,string)) $expect_out(2,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} - -# Determine how the tasks were distributed across nodes -set host_cnt 0 -for {set inx 0} {$inx < $task_cnt} {incr inx} { - for {set jnx 0} {$jnx < $host_cnt} {incr jnx} { - if {[string compare $host($inx) $unique($jnx)] == 0} { - incr uniq_cnt($jnx) - break - } - } - if {$jnx >= $host_cnt} { - set unique($jnx) $host($inx) - set uniq_cnt($jnx) 1 - incr host_cnt - } -} - -set min_ntasks $uniq_cnt(0) -set max_ntasks $uniq_cnt(0) -for {set jnx 0} {$jnx < $host_cnt} {incr jnx} { - if {$min_ntasks > $uniq_cnt($jnx)} { - set min_ntasks $uniq_cnt($jnx) - } - if {$max_ntasks < $uniq_cnt($jnx)} { - set max_ntasks $uniq_cnt($jnx) - } -} -send_user "Spread $task_cnt tasks over $host_cnt hosts\n" -send_user "Ntasks per node range from $min_ntasks to $max_ntasks\n" - -# Make sure tasks were spead over a reasonable number of nodes -set min_nodes $node_cnt -if {$min_nodes > $task_cnt} { - set min_nodes $task_cnt -} -incr min_nodes -1 -if {$min_nodes < 1} { - set $min_nodes 1 -} -if {$host_cnt < $min_nodes} { - send_user "FAILURE: Did not spread job across nodes ($host_cnt < $min_nodes)\n" - exit 1 -} - -# -# Run one tasks per idle node in default partition, up to 64 tasks -# -send_user "\n\nTEST 2\n" -set task_cnt $host_cnt -for {set inx 0} {$inx < $task_cnt} {incr inx} { - set host($inx) "" -} -set srun_pid [spawn $srun -n $task_cnt -l -t1 --spread-job --ntasks-per-node=1 $bin_printenv $node_name_env] -expect { - -re "($number): *($alpha_numeric_under)" { - set host($expect_out(1,string)) $expect_out(2,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} - -# Determine how the tasks were distributed across nodes -set host_cnt 0 -for {set inx 0} {$inx < $task_cnt} {incr inx} { - for {set jnx 0} {$jnx < $host_cnt} {incr jnx} { - if {[string compare $host($inx) $unique($jnx)] == 0} { - incr uniq_cnt($jnx) - break - } - } - if {$jnx >= $host_cnt} { - set unique($jnx) $host($inx) - set uniq_cnt($jnx) 1 - incr host_cnt - } -} - -set min_ntasks $uniq_cnt(0) -set max_ntasks $uniq_cnt(0) -for {set jnx 0} {$jnx < $host_cnt} {incr jnx} { - if {$min_ntasks > $uniq_cnt($jnx)} { - set min_ntasks $uniq_cnt($jnx) - } - if {$max_ntasks < $uniq_cnt($jnx)} { - set max_ntasks $uniq_cnt($jnx) - } -} -send_user "Spread $task_cnt tasks over $host_cnt hosts\n" -send_user "Ntasks per node range from $min_ntasks to $max_ntasks\n" - -# Make sure tasks were spead over a reasonable number of nodes -if {$host_cnt < $task_cnt} { - send_user "FAILURE: Did not spread job across nodes ($host_cnt < $task_cnt)\n" - exit 1 -} -send_user "SUCCESS\n" -exit 0