diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c index 078d942039ad0d94d5fde2d0b51db629730b1a52..cd5838f93a17a4ddb6f70d7ef5818cdf4cd6810c 100644 --- a/src/slurmctld/job_mgr.c +++ b/src/slurmctld/job_mgr.c @@ -3731,7 +3731,7 @@ _valid_job_part_acct(job_desc_msg_t *job_desc, struct part_record *part_ptr) { int i; - if (part_ptr->allow_account_array) { + if (part_ptr->allow_account_array && part_ptr->allow_account_array[0]) { int match = 0; for (i = 0; part_ptr->allow_account_array[i]; i++) { if (strcmp(part_ptr->allow_account_array[i], @@ -3747,7 +3747,8 @@ _valid_job_part_acct(job_desc_msg_t *job_desc, struct part_record *part_ptr) job_desc->account); return ESLURM_INVALID_ACCOUNT; } - } else if (part_ptr->deny_account_array) { + } else if (part_ptr->deny_account_array && + part_ptr->deny_account_array[0]) { int match = 0; for (i = 0; part_ptr->deny_account_array[i]; i++) { if (strcmp(part_ptr->deny_account_array[i], diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am index bda469d6aa75a936e70f1f265a55278022dad4a4..012ebe40a41eac537816838b8de98d4cf7642cdb 100644 --- a/testsuite/expect/Makefile.am +++ b/testsuite/expect/Makefile.am @@ -134,6 +134,10 @@ EXTRA_DIST = \ test2.13 \ test2.14 \ test2.15 \ + test2.16 \ + test2.17 \ + test2.18 \ + test2.19 \ test3.1 \ test3.2 \ test3.3 \ diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in index e23996fb80ff5c5b0494fb2ccc4769940a9f3c25..b15745aeb45b0e28077164ecfd0083909af09c0a 100644 --- a/testsuite/expect/Makefile.in +++ b/testsuite/expect/Makefile.in @@ -475,6 +475,10 @@ EXTRA_DIST = \ test2.13 \ test2.14 \ test2.15 \ + test2.16 \ + test2.17 \ + test2.18 \ + test2.19 \ test3.1 \ test3.2 \ test3.3 \ diff --git a/testsuite/expect/README b/testsuite/expect/README index e4b46d59b3056110d7421d5b08fd958701cb1c91..45b54325a7cdf9e587f9617b075bb05b33f0cba4 100644 --- a/testsuite/expect/README +++ b/testsuite/expect/README @@ -229,6 +229,10 @@ test2.12 Validate scontrol show topology option. test2.13 Validate scontrol update command for job steps. test2.14 Validate scontrol update size of running job. test2.15 Validate scontrol update size of running job with some running tasks. +test2.16 Validate scontrol displays and updates Allow/Deny accounts. +test2.17 Validate scontrol displays and updates Allow/Deny Qos. +test2.18 Validate that Allow/Deny accounts are enforced. +test2.19 Validate that Allow/Deny Qos are enforced. test3.# Testing of scontrol options (best run as SlurmUser or root). diff --git a/testsuite/expect/test2.16 b/testsuite/expect/test2.16 new file mode 100755 index 0000000000000000000000000000000000000000..25003c7d09e1b594b7d1360eff817505a34dbbf6 --- /dev/null +++ b/testsuite/expect/test2.16 @@ -0,0 +1,225 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# Validate scontrol displays and updates Allow/Deny accounts. +# +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2013 SchedMD LLC +# Written by Nathan Yee <nyee32@schedmd.com> +# +# This file is part of SLURM, a resource management program. +# For details, see <http://slurm.schedmd.com/>. +# Please also read the included file: DISCLAIMER. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "test2.16" +set part_name "$test_id\_part" +set acct_name "$test_id\_acct" +set exit_code 0 + +print_header $test_id + +# +# Check accounting config and bail if not found. +# +if { [test_account_storage] == 0 } { + send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n" + exit 0 +} +if { [string compare [check_accounting_admin_level] "Administrator"] } { + send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n" + exit 0 +} + +proc set_part_val {part_type part_val} { + + global scontrol part_name exit_code + + spawn $scontrol update partitionname=$part_name $part_type=$part_val + expect { + -re "Error" { + send_user "\nFAILURE: $part_type was not set\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } +} + +proc check_part {part_type part_val} { + + global scontrol part_name exit_code + + set val_found 0 + spawn $scontrol show partition=$part_name + expect { + -re "$part_type=$part_val" { + set val_found 1 + exp_continue + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$val_found != 1} { + send_user "\nFAILURE: $part_type was not set to $part_val\n" + set exit_code 1 + } +} + +proc delete_part { } { + + global scontrol sacctmgr part_name acct_name + + spawn $scontrol delete partition=$part_name + expect { + -re "error" { + send_user "\nFAILURE: scontrol did not remove partition\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "FAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + set del_acct 0 + spawn $sacctmgr -i delete account $acct_name + expect { + -re "Deleting accounts" { + set del_acct 1 + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$del_acct != 1} { + send_user "\nFAILURE: Account was not deleted\n" + set exit_code 1 + } + +} + +spawn $scontrol create partition=$part_name +expect { + -re "error" { + send_user "\nFAILURE: partition was not created\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not reponding\n" + set exit_code 1 + } + eof { + wait + } +} + +set make_acct 0 +spawn $sacctmgr -i create account $acct_name +expect { + -re "Adding Account" { + set make_acct 1 + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } +} +if { $make_acct != 1 } { + send_user "\nFAILURE: Account was not added\n" + set exit_code 1 +} + +# +# Set AllowAccount +# +set_part_val allowaccount $acct_name + +# +# Check that AllowAccounts is set in the partition +# +check_part AllowAccounts $acct_name + +# +# Set AllowAccounts back to all +# +set_part_val allowaccount ALL + +# +# Check that AllowAccounts is set back to all +# +check_part AllowAccounts ALL + +# +# Set DenyAccount +# +set_part_val denyaccount $acct_name + +# +# Check that DenyAccount was set +# +check_part DenyAccounts $acct_name + +# +# Set DenyAccount to none +# +set_part_val denyaccount none + +# +# Check that DenyAccount was set to ALL +# +check_part DenyAccounts none + +# +# Delete the partition and the Account +# +delete_part + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} +exit $exit_code diff --git a/testsuite/expect/test2.17 b/testsuite/expect/test2.17 new file mode 100755 index 0000000000000000000000000000000000000000..87401e95eb505580a6774353752e022b55bf17dc --- /dev/null +++ b/testsuite/expect/test2.17 @@ -0,0 +1,228 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# Validate scontrol displays and updates Allow/Deny Qos. +# +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2013 SchedMD LLC +# Written by Nathan Yee <nyee32@schedmd.com> +# +# This file is part of SLURM, a resource management program. +# For details, see <http://slurm.schedmd.com/>. +# Please also read the included file: DISCLAIMER. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "test2.17" +set qos_name "$test_id\_qos" +set part_name "$test_id\_part" +set exit_code 0 + +print_header $test_id + +# +# Check accounting config and bail if not found. +# +if { [test_account_storage] == 0 } { + send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n" + exit 0 +} +if { [string compare [check_accounting_admin_level] "Administrator"] } { + send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n" + exit 0 +} + +proc set_part_val {part_type part_val} { + + global scontrol part_name exit_code + + spawn $scontrol update partitionname=$part_name $part_type=$part_val + expect { + -re "Error" { + send_user "\nFAILURE: $part_type was not set\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } +} + +proc check_part {part_type part_val} { + + global scontrol part_name exit_code + + set val_found 0 + spawn $scontrol show partition=$part_name + expect { + -re "$part_type=$part_val" { + set val_found 1 + exp_continue + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$val_found != 1} { + send_user "\nFAILURE: $part_type was not set to $part_val\n" + set exit_code 1 + } +} + +proc delete_part { } { + + global scontrol sacctmgr part_name qos_name + + spawn $scontrol delete partition=$part_name + expect { + -re "error" { + send_user "\nFAILURE: scontrol did not remove " + "partition\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "FAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + set del_acct 0 + spawn $sacctmgr -i delete qos $qos_name + expect { + -re "Deleting QOS" { + set del_acct 1 + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$del_acct != 1} { + send_user "\nFAILURE: Qos was not deleted\n" + set exit_code 1 + } + +} + +spawn $scontrol create partition=$part_name +expect { + -re "error" { + send_user "\nFAILURE: partition was not created\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not reponding\n" + set exit_code 1 + } + eof { + wait + } +} + +set create_qos 0 +spawn $sacctmgr -i create qos $qos_name +expect { + -re "Adding QOS" { + set create_qos 1 + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + +if {$create_qos != 1} { + send_user "\nFAILURE: QOS was not created\n" + set exit_code 1 +} + +# +# Set AllowQos +# +set_part_val AllowQos $qos_name + +# +# Check that AllowQos is set +# +check_part AllowQos $qos_name + +# +# Set AllowQos to ALL +# +set_part_val AllowQos ALL + +# +# Check that Qos is set +# +check_part AllowQos ALL + +# +# Set DenyQos +# +set_part_val DenyQos $qos_name + +# +# Check that DenyQos is set +# +check_part DenyQos $qos_name + +# +# set DenyQos to none +# +set_part_val DenyQos none + +# +# Check that DenyQos is set to none +# +check_part DenyQos none + +# +# Delete partition and Qos +# +delete_part + + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} +exit $exit_code diff --git a/testsuite/expect/test2.18 b/testsuite/expect/test2.18 new file mode 100755 index 0000000000000000000000000000000000000000..661bdc1add3f65c9edee5c230ea2acdf2ea1cbd2 --- /dev/null +++ b/testsuite/expect/test2.18 @@ -0,0 +1,337 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# Validate that Allow/Deny accounts are enforced. +# +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2013 SchedMD LLC +# Written by Nathan Yee <nyee32@schedmd.com> +# +# This file is part of SLURM, a resource management program. +# For details, see <http://slurm.schedmd.com/>. +# Please also read the included file: DISCLAIMER. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "test2.18" +set user_name "" +set node_name "" +set host_name "" +set acct_g "acct_good" +set acct_b "acct_bad" +set part_name "$test_id\_part" +set exit_code 0 + +print_header $test_id + +# +# Check accounting config and bail if not found. +# +if { [test_account_storage] == 0 } { + send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n" + exit 0 +} +if { [string compare [check_accounting_admin_level] "Administrator"] } { + send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n" + exit 0 +} + +proc set_part_val {part_type part_val} { + + global scontrol part_name exit_code + + spawn $scontrol update partitionname=$part_name $part_type=$part_val + expect { + -re "Error" { + send_user "\nFAILURE: $part_type was not set\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } +} + +proc delete_part { } { + + global scontrol sacctmgr part_name acct_g acct_b exit_code + + set del_part 0 + spawn $sacctmgr -i delete account $acct_g $acct_b + expect { + -re "Deleting accounts" { + set del_part 1 + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + spawn $scontrol delete partition=$part_name + expect { + -re "error" { + send_user "\nFAILURE: scontrol did not remove " + "partition\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "FAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$del_part != 1} { + send_user "\nFAILURE: Account was not deleted\n" + set exit_code 1 + } + +} + +proc create_acct { acct } { + + global sacctmgr exit_code user_name + + set create_acct 0 + spawn $sacctmgr -i create account $acct + expect { + -re "Adding Account" { + set create_acct 1 + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + spawn $sacctmgr -i create user $user_name account=$acct + expect { + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$create_acct !=1 } { + send_user "\nFAILURE: Account was not added\n" + set exit_code 1 + } + + +} + +proc test_part { acct part acct_con } { + + global srun host_name exit_code + + + set sub_job 0 + spawn $srun -A $acct -p $part hostname + expect { + -re "$host_name" { + set sub_job 1 + exp_continue + } + -re "error" { + set sub_job 2 + if { $acct_con == 1 && $sub_job == 2} { + send_user "\nThis error is expected\n" + } else { + send_user "\nFAILURE: This error should not have occured\n" + set exit_code 1 + } + exp_continue + } + timeout { + send_user "\nFAILURE: srun is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + +} + + +spawn $bin_id -un +expect { + -re "($alpha_numeric_under)" { + set user_name $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: id is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + +spawn hostname +expect { + + -re "($alpha_numeric_under)" { + set host_name $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: hostname is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + + +spawn $scontrol show node +expect { + -re "NodeName=($alpha_numeric_under)" { + set node_name $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + +# Create partition +spawn $scontrol create partition=$part_name +expect { + -re "error" { + send_user "\nFAILURE: partition was not created\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not reponding\n" + set exit_code 1 + } + eof { + wait + } +} + +# Add nodes to partition +spawn $scontrol update partition=$part_name nodes=$node_name +expect { + -re "error" { + send_user "\nFAILURE: partition was not created\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not reponding\n" + set exit_code 1 + } + eof { + wait + } +} + +# NOTE: acct_g (good account) should always work and +# acct_b (bad account) should always cause an error + +# +# Create good account +# +create_acct $acct_g + +# +# Create bad account +# +create_acct $acct_b + +# +# Set Allow Account to good values +# +set_part_val allowaccount $acct_g + +######Testing AllowAccount##### +send_user "\nTesting AllowAccount\n" + +# +# Test partition with good values +# 0 = good test / 1 = bad test +# +test_part $acct_g $part_name 0 + +# +# Test partition with bad values +# 0 = good test / 1 = bad test +# +test_part $acct_b $part_name 1 + +# +# Set Allow Accounts to all and +# set Deny Account to bad value +# +set_part_val allowaccount ALL +set_part_val denyaccount $acct_b + + +######Testing DenyAccount##### +send_user "\nTesting DenyAccount\n" + +# +# Test partition with good values +# 0 = good test / 1 = bad test +# +test_part $acct_g $part_name 0 + +# +# Test partition with bad values +# 0 = good test / 1 = bad test +# +test_part $acct_b $part_name 1 + + +sleep 2 +delete_part + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} +exit $exit_code diff --git a/testsuite/expect/test2.19 b/testsuite/expect/test2.19 new file mode 100755 index 0000000000000000000000000000000000000000..eae0bb1c73c045221cf7fc920aabc3484a820f10 --- /dev/null +++ b/testsuite/expect/test2.19 @@ -0,0 +1,368 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# Validate that Allow/Deny Qos are enforced. +# +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2013 SchedMD LLC +# Written by Nathan Yee <nyee32@schedmd.com> +# +# This file is part of SLURM, a resource management program. +# For details, see <http://slurm.schedmd.com/>. +# Please also read the included file: DISCLAIMER. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "test2.19" +set user_name "" +set node_name "" +set host_name "" +set acct1 "acct_1" +set acct2 "acct_2" +set qos_g "qos_good" +set qos_b "qos_bad" +set part_name "$test_id\_part" +set exit_code 0 + +print_header $test_id + +# +# Check accounting config and bail if not found. +# +if { [test_account_storage] == 0 } { + send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n" + exit 0 +} +if { [string compare [check_accounting_admin_level] "Administrator"] } { + send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n" + exit 0 +} + +proc set_part_val {part_type part_val} { + + global scontrol part_name exit_code + + spawn $scontrol update partitionname=$part_name $part_type=$part_val + expect { + -re "Error" { + send_user "\nFAILURE: $part_type was not set\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } +} + +proc delete_part { } { + + global scontrol sacctmgr part_name qos_g qos_b acct1 acct2 exit_code + + spawn $scontrol delete partition=$part_name + expect { + -re "error" { + send_user "\nFAILURE: scontrol did not remove " + "partition\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "FAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + set del_part 0 + spawn $sacctmgr -i delete qos $qos_g $qos_b + expect { + -re "Deleting QOS" { + incr del_part + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + spawn $sacctmgr -i delete account $acct1 $acct2 + expect { + -re "Deleting accounts" { + incr del_part + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$del_part != 2} { + send_user "\nFAILURE: Qos/account was not deleted\n" + set exit_code 1 + } + +} + +proc create_qos { acct qos } { + + global sacctmgr user_name exit_code + + set create_qos 0 + spawn $sacctmgr -i create qos $qos + expect { + -re "Adding QOS" { + incr create_qos + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + + spawn $sacctmgr -i create account $acct qos=$qos + expect { + -re "Adding Account" { + incr create_qos + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + set create_acct 0 + spawn $sacctmgr -i create user $user_name account=$acct + expect { + timeout { + send_user "\nFAILURE: sacctmgr is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + if {$create_qos !=2} { + send_user "\nFAILURE: Qos was not set\n" + set exit_code 1 + } +} + + +proc test_part {acct qos part qos_con } { + + global srun host_name part_name exit_code + + + set sub_job 0 + spawn $srun -A $acct --qos $qos -p $part hostname + expect { + -re "$host_name" { + set sub_job 1 + exp_continue + } + -re "error" { + set sub_job 2 + if { $qos_con == 1 && $sub_job == 2} { + send_user "\nThis error is expected\n" + } else { + send_user "\nFAILURE: This error should not have occured\n" + set exit_code 1 + } + exp_continue + } + timeout { + send_user "\nFAILURE: srun is not responding\n" + set exit_code 1 + } + eof { + wait + } + } + + +} + + +spawn $bin_id -un +expect { + -re "($alpha_numeric_under)" { + set user_name $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: id is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + +spawn hostname +expect { + + -re "($alpha_numeric_under)" { + set host_name $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: hostname is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + + +spawn $scontrol show node +expect { + -re "NodeName=($alpha_numeric_under)" { + set node_name $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: scontrol is not responding\n" + set exit_code 1 + } + eof { + wait + } +} + +# Create partition +spawn $scontrol create partition=$part_name +expect { + -re "error" { + send_user "\nFAILURE: partition was not created\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not reponding\n" + set exit_code 1 + } + eof { + wait + } +} + +# Add nodes to partition +spawn $scontrol update partition=$part_name nodes=$node_name +expect { + -re "error" { + send_user "\nFAILURE: partition was not created\n" + set exit_code 1 + } + timeout { + send_user "\nFAILURE: scontrol is not reponding\n" + set exit_code 1 + } + eof { + wait + } +} + +# NOTE: qos_g (good qos) should always work and +# qos_b (bad qos) should always cause an error + +# +# Create good QOS +# +create_qos $acct1 $qos_g + +# +# Create bad QOS +# +create_qos $acct2 $qos_b + +# +# Set Allow Qos to good value +# +set_part_val allowqos $qos_g + +######Testing AllowQos###### +send_user "\nTesting AllowQos\n" + +# +# Test partition with good Qos +# 0 = good test / 1 = bad test +# +test_part $acct1 $qos_g $part_name 0 + +# +# Test partition with bad Qos +# 0 = good test / 1 = bad test +# +test_part $acct2 $qos_b $part_name 1 + +# +# Set Allow Qos back to all and set +# Deny Qos to bad value +# +set_part_val allowqos ALL +set_part_val denyqos $qos_b + +######Testing DenyQos##### +send_user "\nTesting DenyQos\n" + +# +# Test partition with good Qos +# 0 = good test / 1 = bad test +# +test_part $acct1 $qos_g $part_name 0 + +# +# Test partition with bad Qos +# 0 = good test / 1 = bad test +# +test_part $acct2 $qos_b $part_name 1 + +delete_part + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} +exit $exit_code +