From 1d799bb159f9cb3074a4f97d118536e71d67ba91 Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Fri, 24 Feb 2006 16:29:38 +0000
Subject: [PATCH] svn merge -r7220:7266
 https://eris.llnl.gov/svn/slurm/branches/slurm-1.0

---
 META                                          |  4 +-
 NEWS                                          | 14 +++++
 doc/html/bluegene.shtml                       | 12 ++--
 doc/html/selectplugins.shtml                  |  7 ++-
 doc/man/man1/sacct.1                          |  2 +-
 doc/man/man1/smap.1                           |  8 +--
 slurm.spec                                    |  2 +-
 slurm/slurm.h.in                              |  3 +-
 src/common/node_select.c                      | 23 +++----
 src/common/node_select.h                      |  5 +-
 .../select/bluegene/plugin/bg_job_place.c     | 63 ++++---------------
 .../select/bluegene/plugin/bg_job_place.h     |  5 +-
 src/plugins/select/bluegene/plugin/bluegene.c |  2 +-
 .../select/bluegene/plugin/select_bluegene.c  |  5 +-
 src/plugins/select/cons_res/select_cons_res.c |  4 +-
 src/plugins/select/linear/select_linear.c     |  4 +-
 src/plugins/switch/federation/federation.c    | 10 ++-
 src/sacct/sacct.c                             |  2 +-
 src/slurmctld/node_scheduler.c                | 22 +++----
 src/squeue/print.c                            |  2 +-
 src/srun/srun.c                               |  4 +-
 21 files changed, 90 insertions(+), 113 deletions(-)

diff --git a/META b/META
index 9b01fcf7f95..8eacdbb59a1 100644
--- a/META
+++ b/META
@@ -9,8 +9,8 @@
   Name:		slurm
   Major:	1
   Minor:	0
-  Micro:        2
-  Version:	1.0.2
+  Micro:        4
+  Version:	1.0.4
   Release:	1
   API_CURRENT:	9	
   API_AGE:	4
diff --git a/NEWS b/NEWS
index 7d1196a614c..5fd5dc33b89 100644
--- a/NEWS
+++ b/NEWS
@@ -17,6 +17,18 @@ documents those changes that are of interest to users and admins.
  -- Add hash tables for select/cons_res plugin (Susanne Balle, HP, 
     patch_02222006).
 
+* Changes in SLURM 1.0.5
+========================
+ -- Restructure logic for scheduling BlueGene small block jobs. Added
+    "test_only" flag to select_p_job_test() in select plugin.
+ -- Correct squeue "NODELIST" output for BlueGene small block jobs.
+
+* Changes in SLURM 1.0.4
+========================
+ -- Release job allocation if step creation fails (especially for BlueGene).
+ -- Fix bug select/bluegene warm start with changed bglblock layout.
+ -- Fix bug for queuing full-system BlueGene jobs.
+
 * Changes in SLURM 1.0.3
 ========================
  -- Fix bug that could refuse to queue batch jobs for BlueGene system.
@@ -30,6 +42,8 @@ documents those changes that are of interest to users and admins.
     SLURM_JOBID environment variable is invalid.
  -- Federation driver: allow selection of a sepecific switch interface
     (sni0, sni1, etc.) with -euidevice/MP_EUIDEVICE.
+ -- Return an error for "scontrol reconfig" if there is already one in
+    progress
  
 * Changes in SLURM 1.0.2
 ========================
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index 3ee61d8f526..c7a9a68d3ab 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -96,12 +96,12 @@ Values in the X dimension increase to the right.
 Values in the Z dimension increase down and toward the left.</p>
 
 <pre>
-   a a a a b b d d    ID JOBID PARTITION BGL_BLOCK USER   NAME ST TIME NODES NODELIST
-  a a a a b b d d     a  12345 batch     RMP0      joseph tst1 R  43:12   64 bg[000x333]
- a a a a b b c c      b  12346 debug     RMP1      chris  sim3 R  12:34   16 bg[420x533]
-a a a a b b c c       c  12350 debug     RMP2      danny  job3 R   0:12    8 bg[622x733]
-                      d  12356 debug     RMP3      dan    colu R  18:05   16 bg[600x731]
-   a a a a b b d d    e  12378 debug     RMP4      joseph asx4 R   0:34    4 bg[612x713]
+   a a a a b b d d    ID JOBID PARTITION BG_BLOCK USER   NAME ST TIME NODES NODELIST
+  a a a a b b d d     a  12345 batch     RMP0     joseph tst1 R  43:12   64 bg[000x333]
+ a a a a b b c c      b  12346 debug     RMP1     chris  sim3 R  12:34   16 bg[420x533]
+a a a a b b c c       c  12350 debug     RMP2     danny  job3 R   0:12    8 bg[622x733]
+                      d  12356 debug     RMP3     dan    colu R  18:05   16 bg[600x731]
+   a a a a b b d d    e  12378 debug     RMP4     joseph asx4 R   0:34    4 bg[612x713]
   a a a a b b d d
  a a a a b b c c
 a a a a b b c c
diff --git a/doc/html/selectplugins.shtml b/doc/html/selectplugins.shtml
index b583a356c40..5b3e9d9b2c6 100644
--- a/doc/html/selectplugins.shtml
+++ b/doc/html/selectplugins.shtml
@@ -157,7 +157,7 @@ SLURM_NO_CHANGE_IN_DATA if data has not changed since last packed, otherwise SLU
 
 <h3>Job-Specific Node Selection Functions</h3>
 <p class="commandline">int select_p_job_test (struct job_record *job_ptr,
-bitstr_t *bitmap, int min_nodes, int max_nodes);</p>
+bitstr_t *bitmap, int min_nodes, int max_nodes, bool test_only);</p>
 <p style="margin-left:.2in"><b>Description</b>: Given a job's scheduling requirement 
 specification and a set of nodes which might  be used to satisfy the request, identify 
 the nodes which "best" satify the request. Note that nodes being considered for allocation 
@@ -186,6 +186,9 @@ and partition specifications.<br>
 <span class="commandline"> max_nodes</span>&nbsp; &nbsp;&nbsp;(input) 
 maximum number of nodes to allocate to this job. Note this reflects both job 
 and partition specifications.<br>
+<span class="commandline"> test_only</span>&nbsp; &nbsp;&nbsp;(input)
+if set then we only want to test our ability to run the job at some time, 
+not necesarily now with currently available resources.<br>
 </p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and future attempts may be made to schedule 
@@ -323,6 +326,6 @@ to maintain data format compatibility across different versions of the plugin.</
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 27 December 2005</p>
+<p style="text-align:center;">Last modified 23 February 2006</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 61ec6bb0531..b862c0ec53c 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -353,7 +353,7 @@ The format of this field's output is as follows:
 .RS 
 .PD "0"
 .HP 
-\f2[DD:[hh:]]mm:ss.nn\fP 
+\f2[DD-[hh:]]mm:ss.nn\fP 
 .PD 
 .RE 
 .IP 
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 6c1d19e7267..8c021422941 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -47,7 +47,7 @@ Displays information about jobs running on system.
 Displays information about slurm partitions on the system
 .TP
 .I "b"
-Displays information about BGL partitions on the system
+Displays information about BG partitions on the system
 .TP
 .I "c"
 Displays current node states and allows users to configure the system.
@@ -84,8 +84,8 @@ scroll the window containing the text information.
 \fBAVAIL\fR
 Partition state: \fBup\fR or \fBdown\fR.
 .TP
-\fBBGL_BLOCK\fR
-BGL Block Name\fR.
+\fBBG_BLOCK\fR
+BlueGene Block Name\fR.
 .TP
 \fBCONN\fR
 Connection Type: \fBTORUS\fR or \fBMESH\fR or \fBSMALL\fR (for small blocks).
@@ -187,7 +187,7 @@ blocks.
 \fBOUTPUT\fR
 
 .TP
-.I "BGL_BLOCK" BGL Block Name.
+.I "BG_BLOCK" BlueGene Block Name.
 .TP
 .I "CONN"
 Connection Type: \fBTORUS\fR or \fBMESH\fR or \fBSMALL\fR (for small blocks).
diff --git a/slurm.spec b/slurm.spec
index 188bd7ab3b4..b45d0f61182 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -157,7 +157,7 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/*.{a,la}
 LIST=./slurm.files
 touch $LIST
 if [ -d /etc/init.d ]; then
-   echo "%config(noreplace) /etc/init.d/slurm" >> $LIST
+   echo "/etc/init.d/slurm" >> $LIST
 fi
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so &&
    echo %{_libdir}/slurm/task_affinity.so >> $LIST
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 6ab7fdb8e78..8cfa63e3eaf 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -193,8 +193,7 @@ enum select_data_type {
 	SELECT_DATA_NODE_USE,	/* data-> uint16_t node_use */
 	SELECT_DATA_CONN_TYPE,	/* data-> uint16_t connection_type */
 	SELECT_DATA_BLOCK_ID,	/* data-> char bg_block_id */
-	SELECT_DATA_QUARTER,	/* data-> uint32_t quarter */
-	SELECT_DATA_CHECKED	/* data-> uint16_t checked */
+	SELECT_DATA_QUARTER	/* data-> uint32_t quarter */
 };
 
 enum select_print_mode {
diff --git a/src/common/node_select.c b/src/common/node_select.c
index 046ab54c5db..b0811a6c839 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -10,7 +10,7 @@
  *
  *  $Id$
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
  *  UCRL-CODE-217948.
@@ -66,7 +66,7 @@ typedef struct slurm_select_ops {
 	int 		(*part_init)		( List part_list );
 	int		(*job_test)		( struct job_record *job_ptr,
 						  bitstr_t *bitmap, int min_nodes, 
-						  int max_nodes );
+						  int max_nodes, bool test_only );
 	int		(*job_begin)		( struct job_record *job_ptr );
 	int		(*job_ready)		( struct job_record *job_ptr );
 	int		(*job_fini)		( struct job_record *job_ptr );
@@ -109,12 +109,8 @@ struct select_jobinfo {
 	uint16_t node_use;	/* see enum node_use_type */
 	char *bg_block_id;	/* Blue Gene partition ID */
 	uint16_t magic;		/* magic number */
-	int32_t quarter;       /* for bg to tell which quarter of a small
+	int32_t quarter;  	/* for bg to tell which quarter of a small
 				   partition the job is running */ 
-	uint32_t checked;       /* for bg to tell plugin it already 
-				   checked and all partitions were full
-				   looking for best choice now */
-	
 };
 #endif
 
@@ -407,16 +403,17 @@ extern int select_g_get_info_from_plugin (enum select_data_info cr_info, void *d
  * IN/OUT bitmap - map of nodes being considered for allocation on input,
  *                 map of nodes actually to be assigned on output
  * IN min_nodes - minimum number of nodes to allocate to job
- * IN max_nodes - maximum number of nodes to allocate to job 
+ * IN max_nodes - maximum number of nodes to allocate to job
+ * IN test_only - if true, only test if ever could run, not necessarily now 
  */
 extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-        int min_nodes, int max_nodes)
+        int min_nodes, int max_nodes, bool test_only)
 {
 	if (slurm_select_init() < 0)
 		return SLURM_ERROR;
 
 	return (*(g_select_context->ops.job_test))(job_ptr, bitmap, 
-		min_nodes, max_nodes);
+		min_nodes, max_nodes, test_only);
 }
 
 /*
@@ -566,9 +563,6 @@ extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
 	case SELECT_DATA_QUARTER:
 		jobinfo->quarter = *tmp_32;
 		break;
-	case SELECT_DATA_CHECKED:
-		jobinfo->checked = *tmp_16;
-		break;		
 	default:
 		debug("select_g_set_jobinfo data_type %d invalid", 
 		      data_type);
@@ -620,9 +614,6 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
 	case SELECT_DATA_QUARTER:
 		*tmp_32 = jobinfo->quarter;
 		break;
-	case SELECT_DATA_CHECKED:
-		*tmp_16 = jobinfo->checked;
-		break;
 	default:
 		debug("select_g_get_jobinfo data_type %d invalid", 
 		      data_type);
diff --git a/src/common/node_select.h b/src/common/node_select.h
index 90c10ebe34c..cd1102985f7 100644
--- a/src/common/node_select.h
+++ b/src/common/node_select.h
@@ -113,10 +113,11 @@ extern int select_g_job_init(List job_list);
  * IN/OUT bitmap - map of nodes being considered for allocation on input,
  *                 map of nodes actually to be assigned on output
  * IN min_nodes - minimum number of nodes to allocate to job
- * IN max_nodes - maximum number of nodes to allocate to job 
+ * IN max_nodes - maximum number of nodes to allocate to job
+ * IN test_only - if true, only test if ever could run, not necessarily now
  */
 extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-	int min_nodes, int max_nodes);
+	int min_nodes, int max_nodes, bool test_only);
 
 /*
  * Note initiation of job is about to begin. Called immediately 
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 07c7130c584..6029c707f28 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -44,7 +44,8 @@ _STMT_START {		\
 static int  _find_best_block_match(struct job_record* job_ptr,
 				bitstr_t* slurm_block_bitmap,
 				int min_nodes, int max_nodes,
-				int spec, bg_record_t** found_bg_record);
+				int spec, bg_record_t** found_bg_record,
+				bool test_only);
 static void _rotate_geo(uint16_t *req_geometry, int rot_cnt);
 
 /* Rotate a 3-D geometry array through its six permutations */
@@ -87,47 +88,22 @@ static void _rotate_geo(uint16_t *req_geometry, int rot_cnt)
  */
 static int _find_best_block_match(struct job_record* job_ptr, 
 		bitstr_t* slurm_block_bitmap, int min_nodes, int max_nodes,
-		int spec, bg_record_t** found_bg_record)
+		int spec, bg_record_t** found_bg_record, bool test_only)
 {
 	ListIterator itr;
 	bg_record_t* record = NULL;
-	int i;
+	int i, job_running = 0;
 	uint16_t req_geometry[BA_SYSTEM_DIMENSIONS];
-	uint16_t conn_type, rotate, target_size = 1, checked;
+	uint16_t conn_type, rotate, target_size = 1;
 	uint32_t req_procs = job_ptr->num_procs;
 	int rot_cnt = 0;
 	uint32_t proc_cnt;
-	int job_running = 0;
        
 	if(!bg_list) {
 		error("_find_best_block_match: There is no bg_list");
 		return SLURM_ERROR;
 	}
 	
-	select_g_get_jobinfo(job_ptr->select_jobinfo,
-			     SELECT_DATA_CHECKED, &checked);
-	
-	/* have to check checked to see which time the node 
-	   scheduler is looking to see if it is runnable.  If checked >=2 
-	   we want to fall through to tell the scheduler that it is runnable
-	   just not right now. 
-	*/
-	slurm_mutex_lock(&block_state_mutex);
-	if((full_system_block->job_running != -1) && (checked < 2)) {
-		checked++;
-		slurm_mutex_unlock(&block_state_mutex);
-		select_g_set_jobinfo(job_ptr->select_jobinfo,
-				     SELECT_DATA_CHECKED, &checked);
-	
-		debug("_find_best_block_match none found "
-		      "full system running on block %s. %d",
-		      full_system_block->bg_block_id, 
-		      full_system_block->job_running);
-
-		return SLURM_ERROR;
-	}
-	slurm_mutex_unlock(&block_state_mutex);
-			
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
 		SELECT_DATA_CONN_TYPE, &conn_type);
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
@@ -146,30 +122,16 @@ static int _find_best_block_match(struct job_record* job_ptr,
 	debug("number of blocks to check: %d", list_count(bg_list));
      	itr = list_iterator_create(bg_list);
 	while ((record = (bg_record_t*) list_next(itr))) {
-		/* Check processor count */
-		/* have to check checked to see which time the node 
-		   scheduler is looking to see if it is runnable.  
-		   If checked >=2 we want to fall through to tell the 
-		   scheduler that it is runnable just not right now. 
-		*/
-		slurm_mutex_lock(&block_state_mutex);
-		debug3("job_running = %d", record->job_running);
-		if((record->job_running != -1) && (checked < 2)) {
+		if ((record->job_running != -1) && (!test_only)) {
 			job_running++;
-			debug("block %s in use by %s", 
-			      record->bg_block_id,
-			      record->user_name);
-			slurm_mutex_unlock(&block_state_mutex);
 			continue;
 		}
-		slurm_mutex_unlock(&block_state_mutex);
-	
 		if(record->full_block && job_running) {
 			debug("Can't run on full system block "
-			      "another block has a job running.");
+				"another block has a job running.");
 			continue;
 		}
-			
+
 		if (req_procs > record->cnodes_per_bp) {
 			/* We use the c-node count here. Job could start
 			 * twice this count if VIRTUAL_NODE_MODE, but this
@@ -260,9 +222,6 @@ static int _find_best_block_match(struct job_record* job_ptr,
 		break;
 	}
 	list_iterator_destroy(itr);
-	checked++;
-	select_g_set_jobinfo(job_ptr->select_jobinfo,
-			     SELECT_DATA_CHECKED, &checked);
 				
 	/* set the bitmap and do other allocation activities */
 	if (*found_bg_record) {
@@ -284,10 +243,11 @@ static int _find_best_block_match(struct job_record* job_ptr,
  *	be used
  * IN min_nodes, max_nodes  - minimum and maximum number of nodes to allocate
  *	to this job (considers slurm block limits)
+ * IN test_only - if true, only test if ever could run, not necessarily now
  * RET - SLURM_SUCCESS if job runnable now, error code otherwise
  */
 extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
-		      int min_nodes, int max_nodes)
+		      int min_nodes, int max_nodes, bool test_only)
 {
 	int spec = 1; /* this will be like, keep TYPE a priority, etc,  */
 	bg_record_t* record = NULL;
@@ -301,7 +261,8 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	      max_nodes);
 	
 	if ((_find_best_block_match(job_ptr, slurm_block_bitmap, min_nodes, 
-				max_nodes, spec, &record)) == SLURM_ERROR) {
+				max_nodes, spec, &record, test_only)) 
+				== SLURM_ERROR) {
 		return SLURM_ERROR;
 	} else {
 		/* set the block id and quarter (if any) */
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.h b/src/plugins/select/bluegene/plugin/bg_job_place.h
index f68e857af2c..38d6190e4d9 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.h
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.h
@@ -2,7 +2,7 @@
  *  bg_job_place.h - header for blue gene job placement (e.g. base partition 
  *  selection) functions. 
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> et. al.
  *  
@@ -36,9 +36,10 @@
  *	be used
  * IN min_nodes, max_nodes  - minimum and maximum number of nodes to allocate 
  *	to this job (considers slurm partition limits)
+ * IN test_only - if true, only test if ever could run, not necessarily now
  * RET - SLURM_SUCCESS if job runnable now, error code otherwise 
  */
 extern int submit_job(struct job_record *job_ptr, bitstr_t *bitmap,
-	       int min_nodes, int max_nodes);
+	       int min_nodes, int max_nodes, bool test_only);
 
 #endif /* _BG_JOB_PLACE_H_ */
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index 3a0c250109e..faf2be73b35 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -1447,7 +1447,7 @@ static int _delete_old_blocks(void)
 				}
 				if(found_record == NULL) {
 					debug("deleting %s",
-					      found_record->bg_block_id);
+					      init_record->bg_block_id);
 					slurm_attr_init(&attr_agent);
 					if (pthread_attr_setdetachstate(
 						    &attr_agent, 
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 23dd477e2d9..7216f37c699 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -229,12 +229,13 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
  *	satisfy the request are cleared, other left set
  * IN min_nodes - minimum count of nodes
  * IN max_nodes - maximum count of nodes (0==don't care)
+ * IN test_only - if true, only test if ever could run, not necessarily now
  * RET zero on success, EINVAL otherwise
  * NOTE: bitmap must be a superset of req_nodes at the time that 
  *	select_p_job_test is called
  */
 extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-			     int min_nodes, int max_nodes)
+			     int min_nodes, int max_nodes, bool test_only)
 {
 	/* bg block test - is there a block where we have:
 	 * 1) geometry requested
@@ -246,7 +247,7 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	 * as the SLURM block logic will handle access rights.
 	 */
 
-	return submit_job(job_ptr, bitmap, min_nodes, max_nodes);
+	return submit_job(job_ptr, bitmap, min_nodes, max_nodes, test_only);
 }
 
 extern int select_p_job_begin(struct job_record *job_ptr)
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index ee246631715..859575fcf66 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -641,6 +641,8 @@ extern int select_p_block_init(List part_list)
  *	satisfy the request are cleared, other left set
  * IN min_nodes - minimum count of nodes
  * IN max_nodes - maximum count of nodes (0==don't care)
+ * IN test_only - if true, only test if ever could run, not necessarily now,
+ *	not used in this implementation
  * RET zero on success, EINVAL otherwise
  * globals (passed via select_p_node_init): 
  *	node_record_count - count of nodes configured
@@ -653,7 +655,7 @@ extern int select_p_block_init(List part_list)
  *	select_p_job_test is called
  */
 extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap,
-			     int min_nodes, int max_nodes)
+			     int min_nodes, int max_nodes, bool test_only)
 {
 	int i, index, error_code = EINVAL, sufficient;
 	int *consec_nodes;	/* how many nodes we can add from this 
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 75729b4f8d9..f9a56960db8 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -170,6 +170,8 @@ extern int select_p_block_init(List part_list)
  *	satisfy the request are cleared, other left set
  * IN min_nodes - minimum count of nodes
  * IN max_nodes - maximum count of nodes (0==don't care)
+ * IN test_only - if true, only test if ever could run, not necessarily now,
+ *	not used in this implementation of plugin
  * RET zero on success, EINVAL otherwise
  * globals (passed via select_p_node_init): 
  *	node_record_count - count of nodes configured
@@ -182,7 +184,7 @@ extern int select_p_block_init(List part_list)
  *	select_p_job_test is called
  */
 extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-		int min_nodes, int max_nodes)
+		int min_nodes, int max_nodes, bool test_only)
 {
 	int i, index, error_code = EINVAL, sufficient;
 	int *consec_nodes;	/* how many nodes we can add from this 
diff --git a/src/plugins/switch/federation/federation.c b/src/plugins/switch/federation/federation.c
index 8e95379b557..b4643699ab7 100644
--- a/src/plugins/switch/federation/federation.c
+++ b/src/plugins/switch/federation/federation.c
@@ -1542,6 +1542,7 @@ _window_state_set(int adapter_cnt, fed_tableinfo_t *tableinfo,
 	fed_window_t *window;
 	NTBL *table;
 	int i, j;
+	bool adapter_found;
 	
 	assert(tableinfo);
 	assert(hostname);
@@ -1568,13 +1569,18 @@ _window_state_set(int adapter_cnt, fed_tableinfo_t *tableinfo,
 			return SLURM_ERROR;
 		}
 
+		adapter_found = false;
 		/* Find the adapter that matches the one in tableinfo */
 		for (j = 0; j < node->adapter_count; j++) {
 			adapter = &node->adapter_list[j];
-			if (adapter->lid == table->lid)
+			if (strcasecmp(adapter->name,
+				       tableinfo[i].adapter_name) == 0
+			    && adapter->lid == table->lid) {
+				adapter_found = true;
 				break;
+			}
 		}
-		if (adapter->lid != table->lid) {
+		if (!adapter_found) {
 			if (table->lid != 0)
 				error("Did not find the correct adapter: "
 				      "%hu vs. %hu",
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index 6df1dd9506d..a786c53dba8 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -1900,7 +1900,7 @@ void helpMsg(void)
        "	  * Elapsed time fields are presented as 2 fields, integral\n"
        "	    seconds and integral microseconds\n"
        "    * If --dump is not specified, elapsed time fields are presented\n"
-       "      as [[days:]hours:]minutes:seconds.hundredths\n"
+       "      as [[days-]hours:]minutes:seconds.hundredths\n"
        "    * The default input file is the file named in the \"jobacct_loc\"\n"
        "      parameter in " SLURM_CONFIG_FILE ".\n"
        "\n"
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 5ec62daee57..cacdf43fae5 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -289,13 +289,13 @@ _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap,
 		bit_or(bitmap, job_ptr->details->req_node_bitmap);
 	
 	error_code = select_g_job_test(job_ptr, bitmap, 
-			min_nodes, max_nodes);
+			min_nodes, max_nodes, false);
 
 	/* now try to use idle and lightly loaded nodes */
 	if (error_code) {
 		bit_or(bitmap, light_load_bit);
 		error_code = select_g_job_test(job_ptr, bitmap, 
-				min_nodes, max_nodes);
+				min_nodes, max_nodes, false);
 	} 
 	FREE_NULL_BITMAP(light_load_bit);
 
@@ -303,7 +303,7 @@ _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap,
 	if (error_code) {
 		bit_or(bitmap, heavy_load_bit);
 		error_code = select_g_job_test(job_ptr, bitmap, 
-				min_nodes, max_nodes);
+				min_nodes, max_nodes, false);
 	}
 	FREE_NULL_BITMAP(heavy_load_bit);
 
@@ -402,9 +402,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	bool runable_ever  = false;	/* Job can ever run */
 	bool runable_avail = false;	/* Job can run with available nodes */
         int cr_enabled = 0;
-#ifdef HAVE_BG
-	uint16_t checked = 0;
-#endif
+
 	if (node_set_size == 0) {
 		info("_pick_best_nodes: empty node set for selection");
 		return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
@@ -607,7 +605,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				pick_code = select_g_job_test(job_ptr, 
 							      avail_bitmap, 
 							      min_nodes, 
-							      max_nodes);
+							      max_nodes, false);
 			
 			if (pick_code == SLURM_SUCCESS) {
 				if ((node_lim != INFINITE) && 
@@ -630,7 +628,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		    (avail_nodes >= min_nodes) &&
 		    (avail_nodes <  max_nodes)) {
 			pick_code = select_g_job_test(job_ptr, avail_bitmap, 
-						min_nodes, max_nodes);
+						min_nodes, max_nodes, false);
 			if ((pick_code == SLURM_SUCCESS) &&
 			    ((node_lim == INFINITE) ||
 			     (bit_set_count(avail_bitmap) <= node_lim))) {
@@ -663,7 +661,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				pick_code = select_g_job_test(job_ptr, 
 							      avail_bitmap, 
 							      min_nodes, 
-							      max_nodes);
+							      max_nodes, true);
                                 if (cr_enabled)
                                         job_ptr->cr_enabled = 1;
 				if (pick_code == SLURM_SUCCESS) {
@@ -680,7 +678,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				pick_code = select_g_job_test(job_ptr, 
 							      total_bitmap, 
 							      min_nodes, 
-							      max_nodes);
+							      max_nodes, true);
                                 if (cr_enabled)
                                         job_ptr->cr_enabled = 1;
 				if (pick_code == SLURM_SUCCESS)
@@ -704,10 +702,6 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
 		info("_pick_best_nodes: job never runnable");
 	}
-#ifdef HAVE_BG
-	select_g_set_jobinfo(job_ptr->select_jobinfo,
-			     SELECT_DATA_CHECKED, &checked);
-#endif	
 	if (error_code == SLURM_SUCCESS)
 		error_code = ESLURM_NODES_BUSY;
 	return error_code;
diff --git a/src/squeue/print.c b/src/squeue/print.c
index bde6da2565c..978f4615e15 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -562,7 +562,7 @@ int _print_job_reason_list(job_info_t * job, int width, bool right,
 		
 		_print_nodes(job->nodes, width, right, false);
 		if(quarter != -1) {
-			sprintf(tmp_char,"0.%d",quarter);
+			sprintf(tmp_char,".%d",quarter);
 			_print_str(tmp_char, width, right, false);
 		}
 	}
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 416b404b998..407d21867a1 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -238,8 +238,10 @@ long start, end;
 			_print_job_information(resp);
 						
 		job = job_create_allocation(resp);
-		if (create_job_step(job, resp) < 0)
+		if (create_job_step(job, resp) < 0) {
+			srun_job_destroy(job, 0);
 			exit(1);
+		}
 		
 		slurm_free_resource_allocation_response_msg(resp);
 	}
-- 
GitLab