From 0023f6314860d26fcd772972c92b5a4762c5e64c Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Thu, 1 Jun 2006 22:42:45 +0000
Subject: [PATCH] svn merge -r8270:8272
 https://eris/svn/slurm/branches/slurm-1.1

---
 NEWS                                          |  1 +
 src/plugins/select/cons_res/select_cons_res.c | 48 +++++++++++--------
 2 files changed, 30 insertions(+), 19 deletions(-)

diff --git a/NEWS b/NEWS
index eee6274f03a..57f014e8420 100644
--- a/NEWS
+++ b/NEWS
@@ -18,6 +18,7 @@ documents those changes that are of interest to users and admins.
     This fix is needed for MVAPICH2 use.
  -- Add "-V" options to slurmctld and slurmd to print version number and exit.
  -- Fix scalability bug in sbcast.
+ -- Fix bug in cons_res allocation strategy.
 
 * Changes in SLURM 1.1.0
 ========================
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index 72ce60f7c04..54d923109d2 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -841,17 +841,13 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap,
 			}
 			for (i = (best_fit_req - 1);
 			     i >= consec_start[best_fit_location]; i--) {
-				int allocated_cpus;
+				int allocated_cpus, avail_cpus;
 				if ((max_nodes <= 0)
 				||  ((rem_nodes <= 0) && (rem_cpus <= 0)))
 					break;
 				if (bit_test(bitmap, i)) 
-				   continue;
-				bit_set(bitmap, i);
-				rem_nodes--;
-				max_nodes--;
+					continue;
 
-				allocated_cpus = 0;
 				if (!test_only) {
 					rc = select_g_get_select_nodeinfo
 					    (select_node_ptr[i].node_ptr,
@@ -859,32 +855,37 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap,
 					     &allocated_cpus);
 					if (rc != SLURM_SUCCESS)
 						goto cleanup;
+				} else {
+					allocated_cpus = 0;
 				}
 
-				if (select_fast_schedule)
-					rem_cpus -=
+				if (select_fast_schedule) {
+					avail_cpus =
 					    select_node_ptr[i].node_ptr->
 					    config_ptr->cpus -
 					    allocated_cpus;
-				else
-					rem_cpus -=
+				} else {
+					avail_cpus =
 					    select_node_ptr[i].node_ptr->
 					    cpus - allocated_cpus;
+				}
+				if (avail_cpus <= 0)
+					continue;
+				rem_cpus -= avail_cpus;
+				bit_set(bitmap, i);
+				rem_nodes--;
+				max_nodes--;
 			}
 		} else {
 			for (i = consec_start[best_fit_location];
 			     i <= consec_end[best_fit_location]; i++) {
-				int allocated_cpus;
+				int allocated_cpus, avail_cpus;
 				if ((max_nodes <= 0)
 				|| ((rem_nodes <= 0) && (rem_cpus <= 0)))
 					break;
 				if (bit_test(bitmap, i))
 					continue;
-				bit_set(bitmap, i);
-				rem_nodes--;
-				max_nodes--;
 
-				allocated_cpus = 0;
 				if (!test_only) {
 					rc = select_g_get_select_nodeinfo
 					    (select_node_ptr[i].node_ptr,
@@ -892,17 +893,26 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap,
 					     &allocated_cpus);
 					if (rc != SLURM_SUCCESS)
 						goto cleanup;
+				} else {
+					allocated_cpus = 0;
 				}
 
-				if (select_fast_schedule)
-					rem_cpus -=
+				if (select_fast_schedule) {
+					avail_cpus =
 					    select_node_ptr[i].node_ptr->
 					    config_ptr->cpus -
 					    allocated_cpus;
-				else
-					rem_cpus -=
+				} else {
+					avail_cpus =
 					    select_node_ptr[i].node_ptr->
 					    cpus - allocated_cpus;
+				}
+				if (avail_cpus <= 0)
+					continue;
+				rem_cpus -= avail_cpus;
+				bit_set(bitmap, i);
+				rem_nodes--;
+				max_nodes--;
 			}
 		}
 
-- 
GitLab