From 9128476a45426dc0fa59449332a211dc3f81084c Mon Sep 17 00:00:00 2001
From: Tim Wickberg <tim@schedmd.com>
Date: Thu, 25 May 2017 10:49:00 -0600
Subject: [PATCH] Prevent a job tested on multiple partitions from being marked
 WHOLE_NODE_USER.

If a job is considered on a partition with ExclusiveUser=YES
then it would be marked as if it was submitted with the
--exclusive flag, which would lead to delays launching it
on ExclusiveUser=NO partitions, and cause lower-than-expected
cluster usage.

As a side effect, the job_ptr->part_ptr->flags need to be
tested wherever WHOLE_NODE_USER is considered, instead of
just job_ptr->details->whole_node.

Bug 3771.
---
 NEWS                           |  2 ++
 src/slurmctld/node_mgr.c       | 14 +++++++++-----
 src/slurmctld/node_scheduler.c |  7 ++-----
 src/slurmctld/read_config.c    |  6 ++++--
 4 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/NEWS b/NEWS
index 081019150ce..9566a589e13 100644
--- a/NEWS
+++ b/NEWS
@@ -51,6 +51,8 @@ documents those changes that are of interest to users and administrators.
     to 365-days (as is done elsewhere), rather than 49710 days.
  -- Check if variable given to scontrol show job is a valid jobid.
  -- Fix WithSubAccounts option to not include WithDeleted unless requested.
+ -- Prevent a job tested on multiple partitions from being marked
+    WHOLE_NODE_USER.
 
 * Changes in Slurm 17.02.3
 ==========================
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 34ba315b70b..b0f8025fdee 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -3583,8 +3583,10 @@ extern void make_node_alloc(struct node_record *node_ptr,
 		(node_ptr->no_share_job_cnt)++;
 	}
 
-	if (job_ptr->details &&
-	    (job_ptr->details->whole_node == WHOLE_NODE_USER)) {
+	if ((job_ptr->details &&
+	     (job_ptr->details->whole_node == WHOLE_NODE_USER)) ||
+	    (job_ptr->part_ptr &&
+	     (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER))) {
 		node_ptr->owner_job_cnt++;
 		node_ptr->owner = job_ptr->user_id;
 	}
@@ -3787,9 +3789,11 @@ void make_node_idle(struct node_record *node_ptr,
 			xfree(node_ptr->mcs_label);
 		}
 	}
-
-	if (job_ptr && job_ptr->details &&
-	    (job_ptr->details->whole_node == WHOLE_NODE_USER)) {
+	if (job_ptr &&
+	    ((job_ptr->details &&
+	      (job_ptr->details->whole_node == WHOLE_NODE_USER)) ||
+	     (job_ptr->part_ptr &&
+	      (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)))) {
 		if (--node_ptr->owner_job_cnt == 0) {
 			node_ptr->owner = NO_VAL;
 			xfree(node_ptr->mcs_label);
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 41f3fe18998..5c61cca0dac 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -900,11 +900,8 @@ extern void filter_by_node_owner(struct job_record *job_ptr,
 	struct node_record *node_ptr;
 	int i;
 
-	if ((job_ptr->details->whole_node == 0) &&
-	    (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER))
-		job_ptr->details->whole_node = WHOLE_NODE_USER;
-
-	if (job_ptr->details->whole_node == WHOLE_NODE_USER) {
+	if ((job_ptr->details->whole_node == WHOLE_NODE_USER) ||
+	    (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)) {
 		/* Need to remove all nodes allocated to any active job from
 		 * any other user */
 		job_iterator = list_iterator_create(job_list);
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 4c831d695b0..a9461b3ef5e 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -2168,8 +2168,10 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr)
 		} else if (bit_test(job_ptr->node_bitmap, i) == 0)
 			continue;
 
-		if (job_ptr->details &&
-		    (job_ptr->details->whole_node == WHOLE_NODE_USER)) {
+		if ((job_ptr->details &&
+		     (job_ptr->details->whole_node == WHOLE_NODE_USER)) ||
+		    (job_ptr->part_ptr &&
+		     (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER))) {
 			node_ptr->owner_job_cnt++;
 			node_ptr->owner = job_ptr->user_id;
 		}
-- 
GitLab