From 79f4ef80daf0bd4ed9ef05004cb601e690d9ddb0 Mon Sep 17 00:00:00 2001
From: Nathan Yee <nyee32@schedmd.com>
Date: Mon, 24 Feb 2014 13:56:48 -0800
Subject: [PATCH] Add tests for additional QOS and assoc limits

bug 491
---
 testsuite/expect/Makefile.am |   8 ++
 testsuite/expect/Makefile.in |   8 ++
 testsuite/expect/inc21.30.10 |  80 ++++++++++++++
 testsuite/expect/inc21.30.11 |  78 +++++++++++++
 testsuite/expect/inc21.30.12 |  80 ++++++++++++++
 testsuite/expect/inc21.30.13 |  79 +++++++++++++
 testsuite/expect/inc21.30.14 |  79 +++++++++++++
 testsuite/expect/inc21.30.15 |  89 +++++++++++++++
 testsuite/expect/inc21.30.16 |  89 +++++++++++++++
 testsuite/expect/inc21.30.2  |  92 ++++++++--------
 testsuite/expect/inc21.30.9  |  80 ++++++++++++++
 testsuite/expect/test21.30   | 207 +++++++++++++++++++++++++++++------
 12 files changed, 890 insertions(+), 79 deletions(-)
 create mode 100644 testsuite/expect/inc21.30.10
 create mode 100644 testsuite/expect/inc21.30.11
 create mode 100644 testsuite/expect/inc21.30.12
 create mode 100644 testsuite/expect/inc21.30.13
 create mode 100644 testsuite/expect/inc21.30.14
 create mode 100644 testsuite/expect/inc21.30.15
 create mode 100644 testsuite/expect/inc21.30.16
 create mode 100644 testsuite/expect/inc21.30.9

diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am
index 2b6d1fe7072..a0ccb5a737a 100644
--- a/testsuite/expect/Makefile.am
+++ b/testsuite/expect/Makefile.am
@@ -430,6 +430,14 @@ EXTRA_DIST = \
 	inc21.30.6                      \
 	inc21.30.7                      \
 	inc21.30.8                      \
+	inc21.30.9			\
+	inc21.30.10			\
+	inc21.30.11			\
+	inc21.30.12			\
+	inc21.30.13			\
+	inc21.30.14			\
+	inc21.30.15			\
+	inc21.30.16			\
 	test22.1			\
 	inc22.1.1                       \
 	inc22.1.2                       \
diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in
index 0e2c0601f95..7b36b1f2d63 100644
--- a/testsuite/expect/Makefile.in
+++ b/testsuite/expect/Makefile.in
@@ -814,6 +814,14 @@ EXTRA_DIST = \
 	inc21.30.6                      \
 	inc21.30.7                      \
 	inc21.30.8                      \
+	inc21.30.9			\
+	inc21.30.10			\
+	inc21.30.11			\
+	inc21.30.12			\
+	inc21.30.13			\
+	inc21.30.14			\
+	inc21.30.15			\
+	inc21.30.16			\
 	test22.1			\
 	inc22.1.1                       \
 	inc22.1.2                       \
diff --git a/testsuite/expect/inc21.30.10 b/testsuite/expect/inc21.30.10
new file mode 100644
index 00000000000..00247e41942
--- /dev/null
+++ b/testsuite/expect/inc21.30.10
@@ -0,0 +1,80 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the GrpCpuRunMins limit is enforced
+############################################################################
+# Copyright (C) 2014 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_10 { } {
+
+	global salloc srun acct bin_sleep grpcpurunmin_num number exit_code
+
+	send_user "\nStarting GrpCpuRunMins test\n"
+	set job_id1 0
+	set job_id2 0
+	set target_time [expr $grpcpurunmin_num / 4]
+
+# There may be some GrpCPUmins data left from previous jobs, so use less than the limit
+	spawn $salloc --account=$acct -n4 -t[expr $target_time - 3] $srun $bin_sleep 10
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted\n"
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: Job should be pending, but is not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $salloc --account=$acct -n4 -t[expr $target_time + 1] $srun $bin_sleep 10
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is expected.\n"
+		}
+		-re "Granted job allocation ($number)" {
+			send_user "\nFAILURE: Job should be pending, but is not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel both jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.30.11 b/testsuite/expect/inc21.30.11
new file mode 100644
index 00000000000..64420ec299d
--- /dev/null
+++ b/testsuite/expect/inc21.30.11
@@ -0,0 +1,78 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the GrpMem limit is enforced
+############################################################################
+# Copyright (C) 2013 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_11 { } {
+
+	global salloc srun acct bin_sleep grpmem_num number exit_code
+
+	send_user "\nStarting GrpMem test\n"
+	set job_id1 0
+	set job_id2 0
+
+	spawn $salloc --account=$acct -N1 --mem=$grpmem_num -t1 $srun $bin_sleep 10
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted\n"
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: Job should have run but did not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $salloc --account=$acct -N1 --mem=$grpmem_num -t1 $srun $bin_sleep 10
+	expect  {
+		-re "Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is expected.\n"
+		}
+		-re "Granted job allocation ($number)" {
+			send_user "\nFAILURE: Job should have pended, but did not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel both jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.30.12 b/testsuite/expect/inc21.30.12
new file mode 100644
index 00000000000..8f9b22d2127
--- /dev/null
+++ b/testsuite/expect/inc21.30.12
@@ -0,0 +1,80 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the Grpwall limit is enforced
+############################################################################
+# Copyright (C) 2014 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_12 { } {
+	global salloc srun acct bin_sleep grpwall_num number exit_code
+
+	send_user "\nStarting Grpwall test\n"
+	set job_id1 0
+	set job_id2 0
+	set timeout 120
+
+	spawn $salloc --account=$acct -N2 -t2 $srun $bin_sleep [expr $grpwall_num * 60]
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted\n"
+			exp_continue
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: This job should not be pending, but is not\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $salloc --account=$acct -N2 -t2 $srun $bin_sleep [expr $grpwall_num * 2]
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is exected.\n"
+		}
+		-re "Granted job allocation ($number)"{
+			set job_id2 $expect_out(1,string)
+			send_user "\nFAILURE: Job $job_id2 should be waiting for resources, but is not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.30.13 b/testsuite/expect/inc21.30.13
new file mode 100644
index 00000000000..5ea7198a2ef
--- /dev/null
+++ b/testsuite/expect/inc21.30.13
@@ -0,0 +1,79 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the MaxCpuMin limit is enforced
+############################################################################
+# Copyright (C) 2014 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_13 { } {
+	global salloc srun acct bin_sleep maxcpumin_num number exit_code
+
+	send_user "\nStarting MaxCpuMins test\n"
+	set job_id1 0
+	set job_id2 0
+
+	spawn $salloc --account=$acct -n$maxcpumin_num -t1 $srun $bin_sleep 10
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted\n"
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: This job should not be pending\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $salloc --account=$acct -n[expr $maxcpumin_num + 1] -t1 $srun $bin_sleep 10
+	expect {
+		-re " Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is expected.\n"
+
+		}
+		-re "Granted job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nFAILURE: Job $job_id2 should be pending but is not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responing\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.30.14 b/testsuite/expect/inc21.30.14
new file mode 100644
index 00000000000..2c4990f01b0
--- /dev/null
+++ b/testsuite/expect/inc21.30.14
@@ -0,0 +1,79 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the MaxWall limit is enforced
+############################################################################
+# Copyright (C) 2013 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_14 { } {
+	global salloc srun acct bin_sleep number maxwall_num exit_code
+
+	send_user "\nStarting MaxWall test\n"
+	set job_id 0
+
+	spawn $salloc --account=$acct -N1 -t$maxwall_num $srun $bin_sleep 10
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nJob $job_id has been submitted\n"
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nFAILURE: job $job_id should not be waiting for resources\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel job
+        cancel_job $job_id
+
+	spawn $salloc --account=$acct -N1 -t[expr $maxwall_num + 1] $srun $bin_sleep 10
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nJob $job_id is waiting for resources. This is expected\n"
+		}
+		-re "Granted job allocation ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nFAILURE: job $job_id should be pending for resources, but is not\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel job
+	cancel_job $job_id
+}
diff --git a/testsuite/expect/inc21.30.15 b/testsuite/expect/inc21.30.15
new file mode 100644
index 00000000000..c88067ddf69
--- /dev/null
+++ b/testsuite/expect/inc21.30.15
@@ -0,0 +1,89 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the MaxCPUsPU limit is enforced
+############################################################################
+# Copyright (C) 2013 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_15 { } {
+	global salloc srun exit_code bin_sleep number maxcpuspu_num acct
+
+	send_user "\nStarting MaxCPUsPerUser test\n"
+
+	set job_id1 0
+	set job_id2 0
+
+	spawn $salloc --account=$acct -t1 -n$maxcpuspu_num $srun $bin_sleep 20
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted\n"
+		}
+		-re "job ($number) queued and waiting for resources" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: Job $job_id1 show not be waiting for resources, but is not\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $job_id1 == 0 } {
+		send_user "\nFAILUIRE: Job was not submitted\n"
+		set exit_code 1
+	}
+
+	spawn $salloc --account=$acct -t1 -n$maxcpuspu_num $srun $bin_sleep 20
+	expect {
+		-re "job ($number) queued and waiting for resources" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resource. This is expected do not worry\n"
+		}
+		-re "Granted job allocation ($number)" {
+			set job_id2$expect_out(1,string)
+			send_user "\nFAILURE: Job $job_id2 should be pending for resources, but is not\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $job_id2 == 0 } {
+		send_user "\nFAILURE: Job was not submitted\n"
+		set exit_code 1
+	}
+
+	# Cancel Jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.30.16 b/testsuite/expect/inc21.30.16
new file mode 100644
index 00000000000..ae192326e1f
--- /dev/null
+++ b/testsuite/expect/inc21.30.16
@@ -0,0 +1,89 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the MaxNodesPU limit is enforced
+############################################################################
+# Copyright (C) 2014 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_16 { } {
+
+	global salloc srun acct number bin_sleep maxnodespu_num exit_code
+
+	send_user "\nStarting MaxNodesPerUser test\n"
+
+	set job_id 0
+
+	spawn $salloc --account=$acct -N$maxnodespu_num -t1 --exclusive $srun $bin_sleep 2
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nJob $job_id was submitted\n"
+		}
+		-re "error" {
+			send_user "\nFAILURE: Job allocation should not have failed\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $job_id == 0 } {
+		send_user "\nFAILURE: Job was not submitted\n"
+		set exit_code 1
+	} else {
+		# Cancel job
+		cancel_job $job_id
+	}
+
+	spawn $salloc --account=$acct -N[expr $maxnodespu_num + 1] -t1 --exclusive $srun $bin_sleep 2
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nThis error is expected. Do not worry\n"
+		}
+		-re "Granted job allocation" {
+			send_user "\nFAILURE: salloc should not have granted an allocation\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $job_id == 0 } {
+		send_user "\nFAILURE: Job was not submitted\n"
+		set exit_code 1
+	} else {
+		# Cancel job
+		cancel_job $job_id
+	}
+}
diff --git a/testsuite/expect/inc21.30.2 b/testsuite/expect/inc21.30.2
index 9f3d590b1db..4f04c0d671e 100644
--- a/testsuite/expect/inc21.30.2
+++ b/testsuite/expect/inc21.30.2
@@ -1,6 +1,6 @@
 # Purpose: Test of SLURM functionality
-#          to be called from test21.30
-#          Tests if the GrpCpus limit is enforced
+#		to be called from test21.30
+#		Tests if the GrpCpus limit is enforced
 #
 #
 ############################################################################
@@ -30,53 +30,55 @@ proc inc21_30_2 {} {
 
 #test GrpCpus
 
-     global srun salloc acct bin_sleep jobmatch job_id1 job_id2 number exit_code
-     global grcpu_num
+	global srun salloc acct bin_sleep jobmatch job_id1 job_id2 number exit_code
+	global grcpu_num
 
-     send_user "\nStarting GrpCPUs limit test\n\n"
-     set jobmatch 0
-     spawn $salloc --account=$acct -n$grcpu_num $srun $bin_sleep 5
-     expect {
-	     -re "Granted job allocation ($number)" {
-		     set job_id1 $expect_out(1,string)
-		     send_user "\njob $job_id1 has been submitted\n"
-		     incr jobmatch
-	     }
-	     timeout {
-		     send_user "\nFAILURE: salloc is not reponding\n"
-		     set exit_code 1
-	     }
-	     eof {
-		     wait
-	     }
-     }
+	set job_id1 0
+	set job_id2 0
 
-     spawn $salloc --account=$acct -n1 $srun $bin_sleep 5
-     expect {
-	     -re "job ($number)" {
-		     set job_id2 $expect_out(1,string)
-		     send_user "\njob $job_id2 is waiting for resources. This is expected\n"
-		     incr jobmatch
-	     }
-	     timeout {
-		     send_user "\nFAILURE: salloc is not reponding\n"
-		     set exit_code 1
-	     }
-	     eof {
-		     wait
-	     }
-     }
+	send_user "\nStarting GrpCPUs limit test\n\n"
+	set jobmatch 0
+	spawn $salloc --account=$acct -n$grcpu_num $srun $bin_sleep 5
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\njob $job_id1 has been submitted\n"
+			incr jobmatch
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not reponding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
 
-     if {$jobmatch==0} {
-	     send_user "\nFAILURE: jobs were not submitted\n"
-	     set exit_code 1
-     }
+	spawn $salloc --account=$acct -n1 $srun $bin_sleep 5
+	expect {
+		-re "job ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\njob $job_id2 is waiting for resources. This is expected\n"
+			incr jobmatch
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not reponding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
 
-     # checks the state of the job
-     check_state $job_id2
+	if {$jobmatch==0} {
+		send_user "\nFAILURE: jobs were not submitted\n"
+		set exit_code 1
+	}
 
-     # cancels remaining jobs
-     cancel_job $job_id1
-     cancel_job $job_id2
+	# Checks the state of the job
+	check_state $job_id2
 
+	# Cancels remaining jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
 }
diff --git a/testsuite/expect/inc21.30.9 b/testsuite/expect/inc21.30.9
new file mode 100644
index 00000000000..30b9661b473
--- /dev/null
+++ b/testsuite/expect/inc21.30.9
@@ -0,0 +1,80 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.30
+#          Tests if the GrpCPUmins limit is enforced
+############################################################################
+# Copyright (C) 2014 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_30_9 { } {
+	global salloc srun acct bin_sleep grpcpumin_num number qostest exit_code
+
+	send_user "\nStarting GrpCpuMins test\n"
+	set job_id1 0
+	set job_id2 0
+	set timeout [expr $grpcpumin_num * 120]
+
+	spawn $salloc --account=$acct --qos=$qostest -n2 --time=$grpcpumin_num $srun $bin_sleep [expr $grpcpumin_num * 60]
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted\n"
+			exp_continue
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: This job should not be pending\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $salloc --account=$acct --qos=$qostest -n2 -t2 $srun $bin_sleep 120
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is expected.\n"
+		}
+		-re "Granted job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nFAILURE: Job should be pending but is not.\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel jobs
+	cancel_job $job_id1
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/test21.30 b/testsuite/expect/test21.30
index 140f3400178..47dbf14e13b 100755
--- a/testsuite/expect/test21.30
+++ b/testsuite/expect/test21.30
@@ -38,6 +38,14 @@ source ./inc21.30.5
 source ./inc21.30.6
 source ./inc21.30.7
 source ./inc21.30.8
+source ./inc21.30.9
+source ./inc21.30.10
+source ./inc21.30.11
+source ./inc21.30.12
+source ./inc21.30.13
+source ./inc21.30.14
+source ./inc21.30.15
+source ./inc21.30.16
 
 set test_id	"21.30"
 set exit_code	0
@@ -49,18 +57,38 @@ set grn         GrpNodes
 set grn_num     2
 set grcpu       GrpCpus
 set grcpu_num   10
+set grpcpumin   GrpCPUMins
+set grpcpumin_num  1
+# Set grpcpurunmin_num to multiple of CPUs per core to work with most configurations
+# Also make sure that it is at least 4 so we can add and subtract from it
+set grpcpurunmin GrpCPURunMins
+set grpcpurunmin_num 40
 set grjobs      GrpJobs
 set grjobs_num  2
+set grpmem      GrpMem
+set grpmem_num  100
 set grsub       GrpSubmit
 set grsub_num   2
+set grpwall     GrpWall
+set grpwall_num 1
 set maxcpu      MaxCpus
 set maxcpu_num  10
+# Set maxcpumin_num to multiple of CPUs per core to work with most configurations
+set maxcpumin   MaxCPUMins
+set maxcpumin_num 2
+set maxwall     MaxWall
+set maxwall_num 2
+set maxcpuspu   MaxCPUSPerUser
+set maxcpuspu_num 2
 set maxnodes    MaxNodes
 set maxnode_num 10
+set maxnodespu  MaxNodesPerUser
+set maxnodespu_num 2
 set maxjobs     MaxJobs
 set maxjobs_num 2
 set maxjobsub   MaxSubmitJobs
 set maxjobsub_num 2
+set time_spacing 10
 
 print_header $test_id
 
@@ -92,12 +120,12 @@ proc check_state { job } {
 }
 
 # modifies the QoS
-proc mod_qos { node cpu job sub mcpu mnode mjobs mjobsub } {
+proc mod_qos { node cpu job sub mcpu mnode mjobs mjobsub gcpumin gcpurunmin gmem gwall mcpumin mwall mcpupu mnodespu} {
 
-	global sacctmgr exit_code qosname qostest grn grcpu grjobs grsub maxcpu maxnodes maxjobs maxjobsub
+	global sacctmgr exit_code qosname qostest grn grcpu grjobs grsub maxcpu maxnodes maxjobs maxjobsub grpcpumin grpcpurunmin grpmem grpwall maxcpumin maxwall maxcpumin maxwall maxcpuspu maxnodespu
 
 	set change_cnt 0
-	spawn $sacctmgr -i modify qos where $qosname=$qostest set $grn=$node $grcpu=$cpu $grjobs=$job $grsub=$sub $maxcpu=$mcpu $maxnodes=$mnode $maxjobs=$mjobs $maxjobsub=$mjobsub
+	spawn $sacctmgr -i modify qos where $qosname=$qostest set $grn=$node $grcpu=$cpu $grjobs=$job $grsub=$sub $maxcpu=$mcpu $maxnodes=$mnode $maxjobs=$mjobs $maxjobsub=$mjobsub $grpcpumin=$gcpumin $grpcpurunmin=$gcpurunmin $grpmem=$gmem $grpwall=$gwall $maxcpumin=$mcpumin $maxwall=$mwall $maxcpuspu=$mcpupu $maxnodespu=$mnodespu
 	expect {
 		-re "Modified qos" {
 			incr change_cnt
@@ -179,7 +207,43 @@ if { [test_limits_enforced] == 0 } {
 	exit 0
 }
 
-#gets user
+# Remove any vesitgial accounts or qos
+spawn $sacctmgr -i delete qos $qostest
+expect {
+	-re "Deleting QOS(s)" {
+		exp_continue
+	}
+	-re "Error" {
+		send_user "\nFAILURE: QOS was not deleted\n"
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr is not responding\n"
+	}
+	eof {
+		wait
+	}
+}
+
+# Delete account
+spawn $sacctmgr -i  delete account $acct
+expect {
+	-re "Deleting accounts" {
+		exp_continue
+	}
+	-re "Error" {
+		send_user "\nFAILURE: account was not deleted\n"
+		set exit_code 1
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+# Gets user
 spawn $bin_id -u -n
 expect {
 	-re "($alpha_numeric_under)" {
@@ -208,7 +272,7 @@ expect {
 	}
 }
 
-#add account with qos
+# Add account with qos
 set acctmatch 0
 spawn $sacctmgr -i add account $acct qos=$qostest
 expect {
@@ -216,11 +280,6 @@ expect {
 		incr acctmatch
 		exp_continue
 	}
-	-re "Nothing new added" {
-		send_user "\nWARNING: vestigial account $acct found\n"
-		incr acctmatch
-		exp_continue
-	}
 	timeout {
 		send_user "\nFAILURE: sacctmgr is not responding\n"
 		set exit_code 1
@@ -235,7 +294,7 @@ if {$acctmatch != 1} {
 	exit 1
 }
 
-#add user to account
+# Add user to account
 spawn $sacctmgr -i create user name=$user_name account=$acct
 expect {
 	timeout {
@@ -246,28 +305,27 @@ expect {
 	}
 }
 
+mod_qos $grn_num -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-mod_qos $grn_num -1 -1 -1 -1 -1 -1 -1
-
-sleep 1
-#test GrpNode limit
+sleep $time_spacing
+# Test GrpNode limit
 inc21_30_1
 if {$exit_code != 0} {
 	endit
 }
 
-mod_qos -1 $grcpu_num -1 -1 -1 -1 -1 -1
+mod_qos -1 $grcpu_num -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
-#test GrpCpus
+sleep $time_spacing
+# Test GrpCpus
 inc21_30_2
 if {$exit_code != 0} {
 	endit
 }
 
-mod_qos -1 -1 $grjobs_num -1 -1 -1 -1 -1
+mod_qos -1 -1 $grjobs_num -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
+sleep $time_spacing
 # test GrpJob limits
 inc21_30_3
 if {$exit_code != 0} {
@@ -275,9 +333,9 @@ if {$exit_code != 0} {
 }
 
 
-mod_qos -1 -1 -1 $grsub_num -1 -1 -1 -1
+mod_qos -1 -1 -1 $grsub_num -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
+sleep $time_spacing
 # test GrpSubmit
 inc21_30_4
 if {$exit_code != 0} {
@@ -285,40 +343,121 @@ if {$exit_code != 0} {
 }
 
 
-mod_qos -1 -1 -1 -1 $maxcpu_num -1 -1 -1
+mod_qos -1 -1 -1 -1 $maxcpu_num -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
-#test MaxCpus limits
+sleep $time_spacing
+# Test MaxCpus limits
 inc21_30_5
 if {$exit_code != 0} {
 	endit
 }
 
-mod_qos -1 -1 -1 -1 -1 $maxnode_num -1 -1
+mod_qos -1 -1 -1 -1 -1 $maxnode_num -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
-#test MaxNode limit
+sleep $time_spacing
+# Test MaxNode limit
 inc21_30_6
 if {$exit_code != 0} {
 	endit
 }
 
-mod_qos -1 -1 -1 -1 -1 -1 $maxjobs_num -1
+mod_qos -1 -1 -1 -1 -1 -1 $maxjobs_num -1 -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
-#test MaxJobs limit
+sleep $time_spacing
+# Test MaxJobs limit
 inc21_30_7
 if {$exit_code != 0} {
 	endit
 }
 
-mod_qos -1 -1 -1 -1 -1 -1 -1 $maxjobsub_num
+mod_qos -1 -1 -1 -1 -1 -1 -1 $maxjobsub_num -1 -1 -1 -1 -1 -1 -1 -1
 
-sleep 1
-#test MaxJobsSubmits limit
+sleep $time_spacing
+# Test MaxJobsSubmits limit
 inc21_30_8
 if {$exit_code != 0} {
 	endit
 }
 
+#
+# NOTE: This test for Group CPU Mins does not always run properly
+# due to a timing issue caused by a decay timer from the limit once it
+# has been reached. The decay also effects other QOS timed limits so
+# other parts of this test may not work properly if the group cpu
+# run mins test is used.
+#
+if {0} {
+	mod_qos -1 -1 -1 -1 -1 -1 -1 -1 $grpcpumin_num -1 -1 -1 -1 -1 -1 -1
+
+	sleep $time_spacing
+	# Test GroupCPUMins
+	inc21_30_9
+	if {$exit_code != 0 } {
+		endit
+	}
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 $grpcpurunmin_num -1 -1 -1 -1 -1 -1
+
+sleep $time_spacing
+# Test GroupCPURunMins
+inc21_30_10
+if {$exit_code != 0 } {
+	endit
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 $grpmem_num -1 -1 -1 -1 -1
+
+sleep $time_spacing
+# Test Group Memory
+inc21_30_11
+if {$exit_code != 0 } {
+	endit
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  $grpwall_num -1 -1 -1 -1
+
+sleep $time_spacing
+# Test Group wall
+inc21_30_12
+if {$exit_code != 0 } {
+	endit
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 $maxcpumin_num -1 -1 -1
+
+sleep $time_spacing
+# Test Max Cpu Mins
+inc21_30_13
+if {$exit_code != 0 } {
+	endit
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 $maxwall_num -1 -1
+
+sleep $time_spacing
+# Test Max Wall
+inc21_30_14
+if {$exit_code != 0 } {
+	endit
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 $maxcpuspu_num -1
+
+sleep $time_spacing
+# Test Max CPUs Per User
+inc21_30_15
+if {$exit_code != 0 } {
+	endit
+}
+
+mod_qos -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 $maxnodespu_num
+
+sleep $time_spacing
+# Test MaxNodesPerUser
+inc21_30_16
+if {$exit_code != 0 } {
+	endit
+}
+
 endit
-- 
GitLab