diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 81c41a8cc183f5ea856bc1acfe04dd81425e079d..933fc5de2a69a50941a0598ae362db61c3d20d38 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -161,6 +161,14 @@ For pending jobs: the reason a job is waiting for execution
 is printed within parenthesis. 
 See the \fBJOB REASON CODES\fR section below for more information.
 .TP
+\fB%s\fR 
+Node selection plugin specific data. Possible data includes:
+Geometry requirement of resource allocation (X,Y,Z dimensions), 
+Connection type (TORUS, MESH, or NAV == torus else mesh), 
+Permit rotation of geometry (yes or no), 
+Node use (VIRTUAL or COPROCESSOR),
+etc.
+.TP
 \fB%S\fR 
 Start time of the job or job step
 .TP
@@ -185,18 +193,6 @@ User ID
 \fB%x\fR 
 List of node names explicitly excluded by the job
 .TP
-\fB%y\fR 
-Geometry requirement of resource allocation (X,Y,Z dimensions).
-.TP
-\fB%Y\fR 
-Connection type: TORUS, MESH, or NAV (torus else mesh).
-.TP
-\fB%z\fR 
-Permit rotation of geometry (yes or no).
-.TP
-\fB%Z\fR 
-Node use: VIRTUAL or COPROCESSOR.
-.TP
 \fB%.<*>\fR 
 right justification of the field
 .TP
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 7d7981deb77363f757bca07d1993118ebbb23208..f5dfdfb02581dd0d8a6936830df1ee06919de00f 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -92,6 +92,12 @@ BEGIN_C_DECLS
    typedef struct switch_jobinfo *switch_jobinfo_t;	/* opaque data type */
 #endif
 
+/* Define select_jobinfo_t below to avoid including extraneous slurm headers */
+#ifndef __select_jobinfo_t_defined
+#  define  __select_jobinfo_t_defined
+   typedef struct select_jobinfo *select_jobinfo_t;     /* opaque data type */
+#endif
+
 /*****************************************************************************\
  *      DEFINITIONS FOR VERSION MANAGEMENT
 \*****************************************************************************/
@@ -162,6 +168,20 @@ enum node_use_type {
 	RM_COPROCESSOR		/* use second processor for communications */
 };
 
+enum select_data_type {
+	SELECT_DATA_GEOMETRY,	/* data-> uint16_t geometry[SYSTEM_DIMENSIONS] */
+	SELECT_DATA_ROTATE,	/* data-> uint16_t rotate */
+	SELECT_DATA_NODE_USE,	/* data-> uint16_t node_use */
+	SELECT_DATA_CONN_TYPE,	/* data-> uint16_t connection_type */
+	SELECT_DATA_PART_ID	/* data-> char bgl_part_id */
+};
+
+enum select_print_mode {
+	SELECT_PRINT_HEAD,	/* Print just the header */
+	SELECT_PRINT_DATA,	/* Print just the data */
+	SELECT_PRINT_MIXED	/* Print "field=value" */
+};
+
 /* Possible task distributions across the nodes */
 enum task_dist_states {
 	SLURM_DIST_CYCLIC,	/* distribute tasks 1 per node, round robin */
@@ -279,6 +299,8 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	uint16_t rotate;	/* permit geometry rotation if set */
 	uint16_t node_use;	/* see enum node_use_type */
 /* End of Blue Gene specific values */
+	select_jobinfo_t select_jobinfo; /* opaque data type,
+			* SLURM internal use only */
 } job_desc_msg_t;
 
 typedef struct job_info {
@@ -320,17 +342,8 @@ typedef struct job_info {
 	char *account;		/* charge to specified account */
 	uint16_t wait_reason;	/* reason job still pending, see
 				 * slurm.h:enum job_wait_reason */
-/*
- * The following parameters are only meaningful on a Blue Gene
- * system at present. Some will be of value on other system.
- */
-	char *bgl_part_id;	/* Blue Gene partition ID */
-	uint16_t geometry[SYSTEM_DIMENSIONS];	/* node count in various
-				 * dimensions, e.g. X, Y, and Z */
-	uint16_t conn_type;	/* see enum connection_type */
-	uint16_t rotate;	/* permit geometry rotation if set */
-	uint16_t node_use;	/* see enum node_use_type */
-/* End of Blue Gene specific values */
+	select_jobinfo_t select_jobinfo; /* opaque data type,
+			* process using select_g_get_jobinfo() */
 } job_info_t;
 
 typedef struct job_info_msg {
@@ -433,12 +446,8 @@ typedef struct resource_allocation_response_msg {
 	uint16_t node_cnt;	/* count of nodes */
 	slurm_addr *node_addr;	/* network addresses */
 	uint32_t error_code;	/* error code for warning message */
-/*
- * The following parameters are only meaningful on a Blue Gene
- * system at present. Some will be of value on other system.
- */
-	char *bgl_part_id;	/* Blue Gene partition ID */
-/* End of Blue Gene specific values */
+	select_jobinfo_t select_jobinfo;	/* opaque data structure,
+			* use select_g_get_jobinfo() to access conents */
 } resource_allocation_response_msg_t;
 
 typedef struct resource_allocation_and_run_response_msg {
diff --git a/src/api/job_info.c b/src/api/job_info.c
index fb6a072a7f5cfd22e40fc4e7695a98886d5d0abc..6ee8cae1f8b3e9b27ef5dfb15a9d5b60c96ecfb8 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -41,6 +41,7 @@
 #include <slurm/slurm_errno.h>
 
 #include "src/api/job_info.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_protocol_api.h"
 
 /*
@@ -76,7 +77,7 @@ extern void
 slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 {
 	int j;
-	char time_str[16];
+	char time_str[16], select_buf[80];
 	struct passwd *user_info = NULL;
 	struct group *group_info = NULL;
 
@@ -219,29 +220,17 @@ slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 			break;
 	}
 
-#ifdef HAVE_BGL
-	/****** Line 12 ******/
-	if (one_liner)
-		fprintf ( out, " ");
-	else
-		fprintf ( out, "\n   ");
-	fprintf ( out, "Geometry=%ux%ux%u ",  job_ptr->geometry[0],
-		job_ptr->geometry[1], job_ptr->geometry[2]);
-	fprintf ( out, "Connection=%s ", job_conn_type_string(job_ptr->conn_type));
-	if (job_ptr->rotate)
-		fprintf ( out, "Rotate=yes ");
-	else
-		fprintf ( out, "Rotate=no ");
-	if (one_liner)
-		fprintf ( out, " ");
-	else
-		fprintf ( out, "\n   ");
 
-	/****** Line 13 ******/
-	fprintf ( out, "NodeUse=%s BGL_Part_Id=%s",
-		job_node_use_string(job_ptr->node_use),
-		job_ptr->bgl_part_id);
-#endif
+	/****** Line 12 (optional) ******/
+	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
+		select_buf, sizeof(select_buf), SELECT_PRINT_MIXED);
+	if (select_buf[0] != '\0') {
+		if (one_liner)
+			fprintf ( out, " ");
+		else
+			fprintf ( out, "\n   ");
+		fprintf( out, "%s", select_buf);
+	}
 
 	fprintf( out, "\n\n");
 }
diff --git a/src/common/Makefile.am b/src/common/Makefile.am
index 30ef5b07d851201f447cce4c755854e4583a5e6a..36e1d504e13ff471eca27103a459a64e9de704f8 100644
--- a/src/common/Makefile.am
+++ b/src/common/Makefile.am
@@ -35,6 +35,7 @@ libcommon_la_SOURCES = 			\
 	plugin.c plugin.h		\
 	plugrack.c plugrack.h		\
 	read_config.c read_config.h	\
+	node_select.c node_select.h	\
 	setenvpf.c setenvpf.h		\
 	slurm_cred.h       		\
 	slurm_cred.c			\
diff --git a/src/common/hostlist.c b/src/common/hostlist.c
index c1b3dc8e9ff6d8d10f1c93bd843c6bad1b4562ce..a5e57ad6542a9b6fd45aeba27b7f5e1748a4eb92 100644
--- a/src/common/hostlist.c
+++ b/src/common/hostlist.c
@@ -155,18 +155,17 @@ strong_alias(hostset_within,		slurm_hostset_within);
 /* ----[ Internal Data Structures ]---- */
 
 
-#ifdef HAVE_BGL
-/* We allocate space for three digits, 
- * each with values 0 to 9 even if they are not all used */
-bool axis[10][10][10];
-int axis_min_x, axis_min_y, axis_min_z;
-int axis_max_x, axis_max_y, axis_max_z;
-
-
-static void _clear_grid(void);
-static void _set_grid(unsigned long start, unsigned long end);
-static bool _test_box(void);
-#endif	/* HAVE_BGL */
+#ifdef HAVE_BGL		/* logic for block node description */
+   /* We allocate space for three digits, 
+    * each with values 0 to 9 even if they are not all used */
+   bool axis[10][10][10];
+   int axis_min_x, axis_min_y, axis_min_z;
+   int axis_max_x, axis_max_y, axis_max_z;
+
+   static void _clear_grid(void);
+   static void _set_grid(unsigned long start, unsigned long end);
+   static bool _test_box(void);
+#endif
 
 /* hostname type: A convenience structure used in parsing single hostnames */
 struct hostname_components {
@@ -2182,7 +2181,7 @@ _get_bracketed_list(hostlist_t hl, int *start, const size_t n, char *buf)
 	return len;
 }
 
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* logic for block node description */
 static void
 _clear_grid(void)
 {
@@ -2256,7 +2255,7 @@ _test_box(void)
 
 	return true;
 }
-#endif	/* HAVE_BGL */
+#endif
 
 size_t hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
 {
@@ -2267,7 +2266,7 @@ size_t hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
   
 	LOCK_HOSTLIST(hl);
 
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* logic for block node description */
 	if (hl->hr[0]->width != 3)
 		goto notbox;
 	_clear_grid();
@@ -2285,7 +2284,7 @@ size_t hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
 	box = true;
 
   notbox:
-#endif	/* HAVE_BGL */
+#endif
 
 	if (!box) {
 		i=0;
diff --git a/src/common/node_select.c b/src/common/node_select.c
new file mode 100644
index 0000000000000000000000000000000000000000..be6767bcab361a89ab78ac57af9753cba3f5676d
--- /dev/null
+++ b/src/common/node_select.c
@@ -0,0 +1,701 @@
+/*****************************************************************************\
+ *  select_plugin.c - node selection plugin wrapper.
+ *
+ *  NOTE: The node selection plugin itself is intimately tied to 
+ *  slurmctld functions and data structures. Some related 
+ *  functions (e.g. data structure un/packing, environment 
+ *  variable setting) are required by most SLURM commands. 
+ *  Rather than creating a new plugin with these commonly 
+ *  used functions, they are included within this module.
+ *****************************************************************************
+ *  Copyright (C) 2002 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette@llnl.gov>.
+ *  UCRL-CODE-2002-040.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <pthread.h>
+
+#include "src/common/list.h"
+#include "src/common/node_select.h"
+#include "src/common/plugin.h"
+#include "src/common/plugrack.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/slurmctld.h"
+
+/* Define select_jobinfo_t below to avoid including extraneous slurm headers */
+#ifndef __select_jobinfo_t_defined
+#  define  __select_jobinfo_t_defined
+   typedef struct select_jobinfo *select_jobinfo_t;     /* opaque data type */
+#endif
+
+/*
+ * Local data
+ */
+
+typedef struct slurm_select_ops {
+	int		(*state_save)		( char *dir_name );
+	int	       	(*state_restore)	( char *dir_name );
+	int 		(*node_init)		( struct node_record *node_ptr,
+						  int node_cnt);
+	int 		(*part_init)		( List part_list );
+	int		(*job_test)		( struct job_record *job_ptr,
+						  bitstr_t *bitmap, int min_nodes, 
+						  int max_nodes );
+	int		(*job_init)		( struct job_record *job_ptr );
+	int		(*job_fini)		( struct job_record *job_ptr );
+} slurm_select_ops_t;
+
+typedef struct slurm_select_context {
+	char	       	*select_type;
+	plugrack_t     	plugin_list;
+	plugin_handle_t	cur_plugin;
+	int		select_errno;
+	slurm_select_ops_t ops;
+} slurm_select_context_t;
+
+static slurm_select_context_t * g_select_context = NULL;
+static pthread_mutex_t		g_select_context_lock = 
+					PTHREAD_MUTEX_INITIALIZER;
+
+#ifdef HAVE_BGL		/* node selection specific logic */
+#  define JOBINFO_MAGIC 0x83ac
+    struct select_jobinfo {
+	uint16_t geometry[SYSTEM_DIMENSIONS];	/* node count in various
+				 * dimensions, e.g. X, Y, and Z */
+	uint16_t conn_type;	/* see enum connection_type */
+	uint16_t rotate;	/* permit geometry rotation if set */
+	uint16_t node_use;	/* see enum node_use_type */
+	char *bgl_part_id;	/* Blue Gene partition ID */
+	uint16_t magic;		/* magic number */
+    };
+#endif
+
+/*
+ * Local functions
+ */
+static slurm_select_context_t *	_select_context_create(const char *select_type);
+static int 			_select_context_destroy(slurm_select_context_t *c);
+static slurm_select_ops_t *	_select_get_ops(slurm_select_context_t *c);
+
+/*
+ * Locate and load the appropriate plugin
+ */
+static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c)
+{
+	/*
+	 * Must be synchronized with slurm_select_ops_t above.
+	 */
+	static const char *syms[] = {
+		"select_p_state_save",
+		"select_p_state_restore",
+		"select_p_node_init",
+		"select_p_part_init",
+		"select_p_job_test",
+		"select_p_job_init",
+		"select_p_job_fini"
+	};
+	int n_syms = sizeof( syms ) / sizeof( char * );
+
+	/* Get plugin list. */
+	if ( c->plugin_list == NULL ) {
+		char *plugin_dir;
+		c->plugin_list = plugrack_create();
+		if ( c->plugin_list == NULL ) {
+			error( "cannot create plugin manager" );
+			return NULL;
+		}
+		plugrack_set_major_type( c->plugin_list, "select" );
+		plugrack_set_paranoia( c->plugin_list,
+				       PLUGRACK_PARANOIA_NONE,
+				       0 );
+		plugin_dir = slurm_get_plugin_dir();
+		plugrack_read_dir( c->plugin_list, plugin_dir );
+		xfree(plugin_dir);
+	}
+
+	c->cur_plugin = plugrack_use_by_type( c->plugin_list, c->select_type );
+	if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) {
+		error( "cannot find node selection plugin for %s", 
+			c->select_type );
+		return NULL;
+	}
+
+	/* Dereference the API. */
+	if ( plugin_get_syms( c->cur_plugin,
+			      n_syms,
+			      syms,
+			      (void **) &c->ops ) < n_syms ) {
+		error( "incomplete node selection plugin detected" );
+		return NULL;
+	}
+
+	return &c->ops;
+}
+
+/*
+ * Create a node selection context
+ */
+static slurm_select_context_t *_select_context_create(const char *select_type)
+{
+	slurm_select_context_t *c;
+
+	if ( select_type == NULL ) {
+		debug3( "_select_context_create: no uler type" );
+		return NULL;
+	}
+
+	c = xmalloc( sizeof( slurm_select_context_t ) );
+	c->select_type	= xstrdup( select_type );
+	c->plugin_list	= NULL;
+	c->cur_plugin	= PLUGIN_INVALID_HANDLE;
+	c->select_errno	= SLURM_SUCCESS;
+
+	return c;
+}
+
+/*
+ * Destroy a node selection context
+ */
+static int _select_context_destroy( slurm_select_context_t *c )
+{
+	/*
+	 * Must check return code here because plugins might still
+	 * be loaded and active.
+	 */
+	if ( c->plugin_list ) {
+		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
+			return SLURM_ERROR;
+		}
+	}
+
+	xfree( c->select_type );
+	xfree( c );
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Initialize context for node selection plugin
+ */
+extern int slurm_select_init(void)
+{
+	int retval = SLURM_SUCCESS;
+	char *select_type = NULL;
+	
+	slurm_mutex_lock( &g_select_context_lock );
+
+	if ( g_select_context ) goto done;
+
+	select_type = slurm_get_select_type();
+	g_select_context = _select_context_create(select_type);
+	if ( g_select_context == NULL ) {
+		error( "cannot create node selection context for %s",
+			 select_type );
+		retval = SLURM_ERROR;
+		goto done;
+	}
+
+	if ( _select_get_ops( g_select_context ) == NULL ) {
+		error( "cannot resolve node selection plugin operations" );
+		_select_context_destroy( g_select_context );
+		g_select_context = NULL;
+		retval = SLURM_ERROR;
+	}
+
+ done:
+	slurm_mutex_unlock( &g_select_context_lock );
+	xfree(select_type);
+	return retval;
+}
+
+/*
+ * Save any global state information
+ * IN dir_name - directory into which the data can be stored
+ */
+extern int select_g_state_save(char *dir_name)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.state_save))(dir_name);
+}
+
+/*
+ * Initialize context for node selection plugin and
+ * restore any global state information
+ * IN dir_name - directory from which the data can be restored
+ */
+extern int select_g_state_restore(char *dir_name)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.state_restore))(dir_name);
+}
+
+/*
+ * Note re/initialization of node record data structure
+ * IN node_ptr - current node data
+ * IN node_count - number of node entries
+ */
+extern int select_g_node_init(struct node_record *node_ptr, int node_cnt)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.node_init))(node_ptr, node_cnt);
+}
+
+
+/*
+ * Note re/initialization of partition record data structure
+ * IN part_list - list of partition records
+ */
+extern int select_g_part_init(List part_list)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.part_init))(part_list);
+}
+
+/*
+ * Select the "best" nodes for given job from those available
+ * IN job_ptr - pointer to job being considered for initiation
+ * IN/OUT bitmap - map of nodes being considered for allocation on input,
+ *                 map of nodes actually to be assigned on output
+ * IN min_nodes - minimum number of nodes to allocate to job
+ * IN max_nodes - maximum number of nodes to allocate to job 
+ */
+extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
+        int min_nodes, int max_nodes)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.job_test))(job_ptr, bitmap, 
+		min_nodes, max_nodes);
+}
+
+/*
+ * Note initiation of job is about to begin. Called immediately 
+ * after select_g_job_test(). Executed from slurmctld.
+ * IN job_ptr - pointer to job being initiated
+ */
+extern int select_g_job_init(struct job_record *job_ptr)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.job_init))(job_ptr);
+}
+
+/*
+ * Note termination of job is starting. Executed from slurmctld.
+ * IN job_ptr - pointer to job being terminated
+ */
+extern int select_g_job_fini(struct job_record *job_ptr)
+{
+	if (slurm_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_select_context->ops.job_fini))(job_ptr);
+}
+
+#ifdef HAVE_BGL		/* node selection specific logic */
+static char *_job_conn_type_string(uint16_t inx)
+{
+	if (inx == RM_TORUS)
+		return "torus";
+	else if (inx == RM_MESH)
+		return "mesh";
+	else
+		return "nav";
+}
+
+static char *_job_node_use_string(uint16_t inx)
+{
+	if (inx == RM_COPROCESSOR)
+		return "coprocessor";
+	else
+		return "virtual";
+}
+
+
+static char *_job_rotate_string(uint16_t inx)
+{
+	if (inx)
+		return "yes";
+	else
+		return "no";
+}
+
+/* allocate storage for a select job credential
+ * OUT jobinfo - storage for a select job credential
+ * RET         - slurm error code
+ * NOTE: storage must be freed using select_g_free_jobinfo
+ */
+extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo)
+{
+	xassert(jobinfo != NULL);
+
+	*jobinfo = xmalloc(sizeof(struct select_jobinfo));
+	(*jobinfo)->magic = JOBINFO_MAGIC;
+	return SLURM_SUCCESS;
+}
+
+/* fill in a previously allocated select job credential
+ * IN/OUT jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
+extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
+		int data_type, void *data)
+{
+	int i, rc = SLURM_SUCCESS;
+	uint16_t *tmp_16 = (uint16_t *) data;
+	char * tmp_char = (char *) data;
+
+	if (jobinfo->magic != JOBINFO_MAGIC) {
+		error("select_p_set_jobinfo: jobinfo magic bad");
+		return SLURM_ERROR;
+	}
+
+	switch (data_type) {
+		case SELECT_DATA_GEOMETRY:
+			for (i=0; i<SYSTEM_DIMENSIONS; i++)
+				jobinfo->geometry[i] = tmp_16[i];
+			break;
+		case SELECT_DATA_ROTATE:
+			jobinfo->rotate = *tmp_16;
+			break;
+		case SELECT_DATA_NODE_USE:
+			jobinfo->node_use = *tmp_16;
+			break;
+		case SELECT_DATA_CONN_TYPE:
+			jobinfo->conn_type = *tmp_16;
+			break;
+		case SELECT_DATA_PART_ID:
+			jobinfo->bgl_part_id = xstrdup(tmp_char);
+			break;
+		default:
+			debug("select_g_set_jobinfo data_type %d invalid", data_type);
+	}
+
+	return rc;
+}
+
+/* get data from a select job credential
+ * IN jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN/OUT data - the data to enter into job credential
+ */
+extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
+		int data_type, void *data)
+{
+	int i, rc = SLURM_SUCCESS;
+	uint16_t *tmp_16 = (uint16_t *) data;
+	char **tmp_char = (char **) data;
+
+	if (jobinfo->magic != JOBINFO_MAGIC) {
+		error("select_p_set_jobinfo: jobinfo magic bad");
+		return SLURM_ERROR;
+	}
+
+	switch (data_type) {
+		case SELECT_DATA_GEOMETRY:
+			for (i=0; i<SYSTEM_DIMENSIONS; i++)
+				tmp_16[i] = jobinfo->geometry[i];
+			break;
+		case SELECT_DATA_ROTATE:
+			*tmp_16 = jobinfo->rotate;
+			break;
+		case SELECT_DATA_NODE_USE:
+			*tmp_16 = jobinfo->node_use;
+			break;
+		case SELECT_DATA_CONN_TYPE:
+			*tmp_16 = jobinfo->conn_type;
+			break;
+		case SELECT_DATA_PART_ID:
+			if ((jobinfo->bgl_part_id == NULL)
+			||  (jobinfo->bgl_part_id[0] == '\0'))
+				*tmp_char = NULL;
+			else
+				*tmp_char = xstrdup(jobinfo->bgl_part_id);
+			break;
+		default:
+			debug("select_g_get_jobinfo data_type %d invalid", data_type);
+	}
+
+	return rc;
+}
+
+/* copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET        - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo)
+{
+	struct select_jobinfo *rc = NULL;
+
+	if (jobinfo->magic != JOBINFO_MAGIC)
+		error("select_p_copy_jobinfo: jobinfo magic bad");
+	else {
+		int i;
+		rc = xmalloc(sizeof(struct select_jobinfo));
+		rc->magic = JOBINFO_MAGIC;
+		for (i=0; i<SYSTEM_DIMENSIONS; i++)
+			rc->geometry[i] = jobinfo->geometry[i];
+		rc->rotate = jobinfo->rotate;
+		rc->node_use = jobinfo->node_use;
+		rc->conn_type = jobinfo->conn_type;
+		rc->rotate = jobinfo->rotate;
+		rc->bgl_part_id = xstrdup(jobinfo->bgl_part_id);
+	}
+
+	return rc;
+}
+
+/* free storage previously allocated for a select job credential
+ * IN jobinfo  - the select job credential to be freed
+ */
+extern int select_g_free_jobinfo  (select_jobinfo_t *jobinfo)
+{
+	int rc = SLURM_SUCCESS;
+
+	xassert(jobinfo != NULL);
+	if (*jobinfo == NULL)	/* never set, treat as not an error */
+		;
+	else if ((*jobinfo)->magic != JOBINFO_MAGIC) {
+		error("select_p_set_jobinfo: jobinfo magic bad");
+		rc = EINVAL;
+	} else {
+		(*jobinfo)->magic = 0;
+		xfree((*jobinfo)->bgl_part_id);
+		xfree(*jobinfo);
+	}
+	return rc;
+}
+
+/* pack a select job credential into a buffer in machine independent form
+ * IN jobinfo  - the select job credential to be saved
+ * OUT buffer  - buffer with select credential appended
+ * RET         - slurm error code
+ */
+extern int  select_g_pack_jobinfo  (select_jobinfo_t jobinfo, Buf buffer)
+{
+	int i;
+
+	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+		pack16(jobinfo->geometry[i], buffer);		
+	pack16(jobinfo->conn_type, buffer);
+	pack16(jobinfo->rotate, buffer);
+	pack16(jobinfo->node_use, buffer);
+	packstr(jobinfo->bgl_part_id, buffer);
+
+	return SLURM_SUCCESS;
+}
+
+/* unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN  buffer  - buffer with select credential read from current pointer loc
+ * RET         - slurm error code
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer)
+{
+	int i;
+	uint16_t uint16_tmp;
+
+	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+		safe_unpack16(&(jobinfo->geometry[i]), buffer);
+	safe_unpack16(&(jobinfo->conn_type), buffer);
+	safe_unpack16(&(jobinfo->rotate), buffer);
+	safe_unpack16(&(jobinfo->node_use), buffer);
+	safe_unpackstr_xmalloc(&(jobinfo->bgl_part_id), &uint16_tmp, buffer);
+	return SLURM_SUCCESS;
+
+      unpack_error:
+	return SLURM_ERROR;
+}
+
+/* write select job credential to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job credential contents
+ * IN size    - byte size of buf
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
+		char *buf, size_t size, int mode)
+{
+	uint16_t geometry[SYSTEM_DIMENSIONS];
+	int i;
+
+	if (buf == NULL) {
+		error("select_p_sprint_jobinfo: buf is null");
+		return NULL;
+	}
+
+	if ((mode != SELECT_PRINT_DATA)
+	&& jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
+		error("select_p_copy_jobinfo: jobinfo magic bad");
+		return NULL;
+	}
+
+	if (jobinfo->geometry[0] == (uint16_t) NO_VAL) {
+		for (i=0; i<SYSTEM_DIMENSIONS; i++)
+			geometry[i] = 0;
+	} else {
+		for (i=0; i<SYSTEM_DIMENSIONS; i++)
+			geometry[i] = jobinfo->geometry[i];
+	}
+
+	switch (mode) {
+		case SELECT_PRINT_HEAD:
+			snprintf(buf, size,
+				"CONNECT ROTATE NODE_USE GEOMETRY PART_ID");
+			break;
+		case SELECT_PRINT_DATA:
+			snprintf(buf, size, 
+				"%7.7s %6.6s %8.8s    %ux%ux%u %7s",
+				_job_conn_type_string(jobinfo->conn_type),
+				_job_rotate_string(jobinfo->rotate),
+				_job_node_use_string(jobinfo->node_use),
+				geometry[0], geometry[1], geometry[2],
+				jobinfo->bgl_part_id);
+			break;
+		case SELECT_PRINT_MIXED:
+			snprintf(buf, size, 
+				"Connection=%s Rotate=%s NodeUse=%s "
+				"Geometry=%ux%ux%u Part_ID=%s",
+				_job_conn_type_string(jobinfo->conn_type),
+				_job_rotate_string(jobinfo->rotate),
+				_job_node_use_string(jobinfo->node_use),
+				geometry[0], geometry[1], geometry[2],
+				jobinfo->bgl_part_id);
+			break;
+		default:
+			error("select_g_sprint_jobinfo: bad mode %d", mode);
+			if (size > 0)
+				buf[0] = '\0';
+	}
+
+	return buf;
+}
+
+#else	/* !HAVE_BGL */
+
+/* allocate storage for a select job credential
+ * OUT jobinfo - storage for a select job credential
+ * RET         - slurm error code
+ * NOTE: storage must be freed using select_g_free_jobinfo
+ */
+extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo)
+{
+	return SLURM_SUCCESS;
+}
+
+/* fill in a previously allocated select job credential
+ * IN/OUT jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
+extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
+		int data_type, void *data)
+{
+	return SLURM_SUCCESS;
+}
+
+/* get data from a select job credential
+ * IN jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN/OUT data - the data to enter into job credential
+ */
+extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
+		int data_type, void *data)
+{
+	return SLURM_SUCCESS;
+}
+
+/* copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET        - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo)
+{
+	return NULL;
+}
+
+/* free storage previously allocated for a select job credential
+ * IN jobinfo  - the select job credential to be freed
+ */
+extern int select_g_free_jobinfo  (select_jobinfo_t *jobinfo)
+{
+	return SLURM_SUCCESS;
+}
+
+/* pack a select job credential into a buffer in machine independent form
+ * IN jobinfo  - the select job credential to be saved
+ * OUT buffer  - buffer with select credential appended
+ * RET         - slurm error code
+ */
+extern int  select_g_pack_jobinfo  (select_jobinfo_t jobinfo, Buf buffer)
+{
+	return SLURM_SUCCESS;
+}
+
+/* unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN  buffer  - buffer with select credential read from current pointer loc
+ * RET         - slurm error code
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer)
+{
+	return SLURM_SUCCESS;
+}
+
+/* write select job credential to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job credential contents
+ * IN size    - byte size of buf
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
+		char *buf, size_t size, int mode)
+{
+	if (buf && size) {
+		buf[0] = '\0';
+		return buf;
+	} else
+		return NULL;
+}
+
+#endif
diff --git a/src/common/node_select.h b/src/common/node_select.h
new file mode 100644
index 0000000000000000000000000000000000000000..bc43c6abfcfaab6e681a1e49f762cc375a5f766a
--- /dev/null
+++ b/src/common/node_select.h
@@ -0,0 +1,157 @@
+/*****************************************************************************\
+ *  node_select.h - Define node selection plugin functions.
+ *****************************************************************************
+ *  Copyright (C) 2004 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  UCRL-CODE-2002-040.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#ifndef __SELECT_PLUGIN_API_H__
+#define __SELECT_PLUGIN_API_H__
+
+#include "src/common/list.h"
+#include "src/slurmctld/slurmctld.h"
+
+/*****************************************\
+ * GLOBAL SELECT STATE MANGEMENT FUNCIONS *
+\*****************************************/
+
+/*
+ * Initialize context for node selection plugin
+ */
+extern int slurm_select_init(void);
+
+/*
+ * Save any global state information
+ * IN dir_name - directory into which the data can be stored
+ */
+extern int select_g_state_save(char *dir_name);
+
+/*
+ * Initialize context for node selection plugin and
+ * restore any global state information
+ * IN dir_name - directory from which the data can be restored
+ */
+extern int select_g_state_restore(char *dir_name);
+
+/*
+ * Note re/initialization of node record data structure
+ * IN node_ptr - current node data
+ * IN node_count - number of node entries
+ */
+extern int select_g_node_init(struct node_record *node_ptr, int node_cnt);
+
+/*
+ * Note re/initialization of partition record data structure
+ * IN part_list - list of partition records
+ */
+extern int select_g_part_init(List part_list);
+
+/******************************************************\
+ * JOB-SPECIFIC SELECT CREDENTIAL MANAGEMENT FUNCIONS *
+\******************************************************/
+
+/*
+ * Select the "best" nodes for given job from those available
+ * IN job_ptr - pointer to job being considered for initiation
+ * IN/OUT bitmap - map of nodes being considered for allocation on input,
+ *                 map of nodes actually to be assigned on output
+ * IN min_nodes - minimum number of nodes to allocate to job
+ * IN max_nodes - maximum number of nodes to allocate to job 
+ */
+extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
+	int min_nodes, int max_nodes);
+
+/*
+ * Note initiation of job is about to begin. Called immediately 
+ * after select_g_job_test(). Executed from slurmctld.
+ * IN job_ptr - pointer to job being initiated
+ */
+extern int select_g_job_init(struct job_record *job_ptr);
+
+/*
+ * Note termination of job is starting. Executed from slurmctld.
+ * IN job_ptr - pointer to job being terminated
+ */
+extern int select_g_job_fini(struct job_record *job_ptr);
+
+/* allocate storage for a select job credential
+ * OUT jobinfo - storage for a select job credential
+ * RET         - slurm error code
+ * NOTE: storage must be freed using select_g_free_jobinfo
+ */
+extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo);
+
+/* fill in a previously allocated select job credential
+ * IN/OUT jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
+extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
+		int data_type, void *data);
+
+/* get data from a select job credential
+ * IN jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN/OUT data - the data to enter into job credential
+ */
+extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
+		int data_type, void *data);
+
+/* copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET        - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo);
+
+/* free storage previously allocated for a select job credential
+ * IN jobinfo  - the select job credential to be freed
+ * RET         - slurm error code
+ */
+extern int select_g_free_jobinfo  (select_jobinfo_t *jobinfo);
+
+/* pack a select job credential into a buffer in machine independent form
+ * IN jobinfo  - the select job credential to be saved
+ * OUT buffer  - buffer with select credential appended
+ * RET         - slurm error code
+ */
+extern int  select_g_pack_jobinfo  (select_jobinfo_t jobinfo, Buf buffer);
+
+/* unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN  buffer  - buffer with select credential read from current pointer loc
+ * RET         - slurm error code
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer);
+
+/* write select job credential to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job credential contents
+ * IN size    - byte size of buf
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
+		char *buf, size_t size, int mode);
+
+#endif /*__SELECT_PLUGIN_API_H__*/
diff --git a/src/common/read_config.c b/src/common/read_config.c
index a73bba69f0684fc90ed84851b0c861f1a8b1515f..09b7899b4a7b00090b659a2add7cd61200d3289c 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -351,7 +351,7 @@ parse_config_spec (char *in_line, slurm_ctl_conf_t *ctl_conf_ptr)
 	if ( inactive_limit != -1) {
 		if ( ctl_conf_ptr->inactive_limit != (uint16_t) NO_VAL)
 			error (MULTIPLE_VALUE_MSG, "InactiveLimit");
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Inactive limit must be zero on blue gene */
 		if (inactive_limit) {
 			error("InactiveLimit=%d invalid on Blue Gene/L",
 				inactive_limit);
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 2e7df1275ecded2bf3a9bcbbbdc095af5e47d783..59d3ed11207f361b152f2cc274e737e04fa5eaaf 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -45,10 +45,10 @@
 #define DEFAULT_RETURN_TO_SERVICE   0
 #define DEFAULT_SAVE_STATE_LOC      "/tmp"
 #define DEFAULT_SCHEDTYPE           "sched/builtin"
-#ifdef HAVE_BGL
-#define DEFAULT_SELECT_TYPE         "select/bluegene"
+#ifdef HAVE_BGL		/* Blue Gene specific default configuration parameters */
+#  define DEFAULT_SELECT_TYPE         "select/bluegene"
 #else
-#define DEFAULT_SELECT_TYPE         "select/linear"
+#  define DEFAULT_SELECT_TYPE         "select/linear"
 #endif
 #define DEFAULT_SLURMCTLD_PIDFILE   "/var/run/slurmctld.pid"
 #define DEFAULT_SLURMCTLD_TIMEOUT   120
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index 779d2db39d9aba58c67050fe78ee38f3ff44848b..9aac3ab2cb0844d0083968108e3eddcb8e765fae 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -37,6 +37,7 @@
 #include <stdio.h>
 
 #include "src/common/log.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/switch.h"
@@ -118,6 +119,7 @@ void slurm_free_job_desc_msg(job_desc_msg_t * msg)
 	int i;
 
 	if (msg) {
+		select_g_free_jobinfo(&msg->select_jobinfo);
 		xfree(msg->alloc_node);
 		for (i = 0; i < msg->env_size; i++) {
 			xfree(msg->environment[i]);
@@ -165,9 +167,8 @@ void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 			}
 			xfree(msg->environment);
 		}
-#ifdef HAVE_BGL
-		xfree(msg->bgl_part_id);
-#endif
+
+		select_g_free_jobinfo(&msg->select_jobinfo);
 
 		xfree(msg);
 	}
@@ -184,6 +185,7 @@ void slurm_free_job_info(job_info_t * job)
 void slurm_free_job_info_members(job_info_t * job)
 {
 	if (job) {
+		select_g_free_jobinfo(&job->select_jobinfo);
 		xfree(job->account);
 		xfree(job->nodes);
 		xfree(job->partition);
@@ -463,33 +465,6 @@ char *job_state_string_compact(enum job_states inx)
 		return job_state_string[inx];
 }
 
-
-extern char *job_conn_type_string(uint16_t inx)
-{
-#ifdef HAVE_BGL
-	if (inx == RM_TORUS)
-		return "torus";
-	else if (inx == RM_MESH)
-		return "mesh";
-	else
-		return "nav";
-#else
-	return "n/a";
-#endif
-}
-
-extern char *job_node_use_string(uint16_t inx)
-{
-#ifdef HAVE_BGL
-	if (inx == RM_COPROCESSOR)
-		return "coprocessor";
-	else
-		return "virtual";
-#else
-	return "n/a";
-#endif
-}
-
 char *node_state_string(enum node_states inx)
 {
 	static char *node_state_string[] = {
@@ -560,11 +535,11 @@ void slurm_free_resource_allocation_response_msg (
 				resource_allocation_response_msg_t * msg)
 {
 	if (msg) {
+		select_g_free_jobinfo(&msg->select_jobinfo);
 		xfree(msg->node_list);
 		xfree(msg->cpus_per_node);
 		xfree(msg->cpu_count_reps);
 		xfree(msg->node_addr);
-		xfree(msg->bgl_part_id);
 		xfree(msg);
 	}
 }
@@ -699,7 +674,7 @@ static void _slurm_free_job_info_members(job_info_t * job)
 		xfree(job->req_nodes);
 		xfree(job->features);
 		xfree(job->req_node_inx);
-		xfree(job->bgl_part_id);
+		select_g_free_jobinfo(&job->select_jobinfo);
 	}
 }
 
diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h
index 4c9cedda56e4771eec8c23d2c8a99004f0b43fb7..0539558bd8070ff6388be75814455b85a5639079 100644
--- a/src/common/slurm_protocol_defs.h
+++ b/src/common/slurm_protocol_defs.h
@@ -366,9 +366,7 @@ typedef struct batch_job_launch_msg {
 	uint16_t envc;		/* element count in environment */
 	char **environment;	/* environment variables to set for job, 
 				 *   name=value pairs, one per line */
-#ifdef HAVE_BGL
-	char *bgl_part_id;	/* Blue Gene partition ID */
-#endif
+	select_jobinfo_t select_jobinfo;	/* opaque data type */
 } batch_job_launch_msg_t;
 
 typedef struct job_id_request_msg {
@@ -503,10 +501,6 @@ void slurm_free_job_step_info_response_msg(
 void slurm_free_node_info_msg(node_info_msg_t * msg);
 void slurm_free_partition_info_msg(partition_info_msg_t * msg);
 
-
-extern char *job_conn_type_string(uint16_t inx);
-extern char *job_dist_string(uint16_t inx);
-extern char *job_node_use_string(uint16_t inx);
 extern char *job_reason_string(enum job_wait_reason inx);
 extern char *job_state_string(enum job_states inx);
 extern char *job_state_string_compact(enum job_states inx);
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index fe8956f0cd3030d5d68c9cc1d50d826f37141536..089240b20c8ae795664713152ae881b693573fc9 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -35,6 +35,7 @@
 
 #include "src/common/bitstring.h"
 #include "src/common/log.h"
+#include "src/common/node_select.h"
 #include "src/common/pack.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_cred.h"
@@ -874,10 +875,7 @@ _pack_resource_allocation_response_msg(resource_allocation_response_msg_t *
 
 	pack16(msg->node_cnt, buffer);
 	_pack_slurm_addr_array(msg->node_addr, msg->node_cnt, buffer);
-
-#ifdef HAVE_BGL
-	packstr(msg->bgl_part_id, buffer);
-#endif
+	select_g_pack_jobinfo(msg->select_jobinfo, buffer);
 }
 
 static int
@@ -918,24 +916,24 @@ _unpack_resource_allocation_response_msg(resource_allocation_response_msg_t
 	safe_unpack16(&tmp_ptr->node_cnt, buffer);
 	if (tmp_ptr->node_cnt > 0) {
 		if (_unpack_slurm_addr_array(&(tmp_ptr->node_addr),
-					     &(tmp_ptr->node_cnt), buffer))
+					     &uint16_tmp, buffer))
+			goto unpack_error;
+		if (uint16_tmp != tmp_ptr->node_cnt)
 			goto unpack_error;
 	} else
 		tmp_ptr->node_addr = NULL;
 
-#ifdef HAVE_BGL
-	safe_unpackstr_xmalloc(&tmp_ptr->bgl_part_id,  &uint16_tmp, buffer);
-#endif
+	if (select_g_alloc_jobinfo (&tmp_ptr->select_jobinfo)
+	||  select_g_unpack_jobinfo(tmp_ptr->select_jobinfo, buffer))
+		goto unpack_error;
 
 	return SLURM_SUCCESS;
 
       unpack_error:
+	select_g_free_jobinfo(&tmp_ptr->select_jobinfo);
 	xfree(tmp_ptr->node_list);
 	xfree(tmp_ptr->cpus_per_node);
 	xfree(tmp_ptr->cpu_count_reps);
-#ifdef HAVE_BGL
-	xfree(tmp_ptr->bgl_part_id);
-#endif
 	xfree(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
@@ -998,7 +996,9 @@ static int
 	safe_unpack16(&tmp_ptr->node_cnt, buffer);
 	if (tmp_ptr->node_cnt > 0) {
 		if (_unpack_slurm_addr_array(&(tmp_ptr->node_addr),
-					     &(tmp_ptr->node_cnt), buffer))
+					     &uint16_tmp, buffer))
+			goto unpack_error;
+		if (uint16_tmp != tmp_ptr->node_cnt)
 			goto unpack_error;
 	} else
 		tmp_ptr->node_addr = NULL;
@@ -1643,18 +1643,10 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 		xfree(node_inx_str);
 	}
 	safe_unpack32(&job->num_procs, buffer);
-
-#ifdef HAVE_BGL
-{
-	int i;
-	safe_unpackstr_xmalloc(&job->bgl_part_id, &uint16_tmp, buffer);
-	safe_unpack16(&job->conn_type, buffer);
-	safe_unpack16(&job->rotate, buffer);
-	safe_unpack16(&job->node_use, buffer);
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		safe_unpack16(&job->geometry[i], buffer);
-}
-#endif
+	
+	if (select_g_alloc_jobinfo(&job->select_jobinfo) 
+	||  select_g_unpack_jobinfo(job->select_jobinfo, buffer))
+		goto unpack_error;
 
 	safe_unpack32(&job->num_nodes, buffer);
 	safe_unpack16(&job->shared, buffer);
@@ -1691,9 +1683,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 	xfree(job->req_nodes);
 	xfree(job->exc_nodes);
 	xfree(job->features);
-#ifdef HAVE_BGL
-	xfree(job->bgl_part_id);
-#endif
+	select_g_free_jobinfo(&job->select_jobinfo);
 	return SLURM_ERROR;
 }
 
@@ -1855,6 +1845,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 static void
 _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 {
+	select_jobinfo_t jobinfo;
+
 	/* load the data values */
 	pack16(job_desc_ptr->contiguous, buffer);
 	pack16(job_desc_ptr->kill_on_node_fail, buffer);
@@ -1900,15 +1892,18 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	pack16(job_desc_ptr->port, buffer);
 	packstr(job_desc_ptr->host, buffer);
 
-#ifdef HAVE_BGL
-{	int i;
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		pack16(job_desc_ptr->geometry[i], buffer);		
-	pack16(job_desc_ptr->conn_type, buffer);
-	pack16(job_desc_ptr->rotate, buffer);
-	pack16(job_desc_ptr->node_use, buffer);
-}
-#endif
+	if (select_g_alloc_jobinfo (&jobinfo) == SLURM_SUCCESS) {
+		select_g_set_jobinfo(jobinfo, SELECT_DATA_GEOMETRY, 
+			job_desc_ptr->geometry);
+		select_g_set_jobinfo(jobinfo, SELECT_DATA_CONN_TYPE, 
+			&(job_desc_ptr->conn_type));
+		select_g_set_jobinfo(jobinfo, SELECT_DATA_ROTATE, 
+			&(job_desc_ptr->rotate));
+		select_g_set_jobinfo(jobinfo, SELECT_DATA_NODE_USE, 
+			&(job_desc_ptr->node_use));
+		select_g_pack_jobinfo(jobinfo, buffer);
+		select_g_free_jobinfo(&jobinfo);
+	}
 }
 
 /* _unpack_job_desc_msg
@@ -1972,20 +1967,22 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	safe_unpack16(&job_desc_ptr->port, buffer);
 	safe_unpackstr_xmalloc(&job_desc_ptr->host, &uint16_tmp, buffer);
 
-#ifdef HAVE_BGL
-{
-	int i;
-	for(i=0; i<SYSTEM_DIMENSIONS; i++)
-		safe_unpack16(&(job_desc_ptr->geometry[i]), buffer);
-	safe_unpack16(&job_desc_ptr->conn_type, buffer);
-	safe_unpack16(&job_desc_ptr->rotate, buffer);
-	safe_unpack16(&job_desc_ptr->node_use, buffer);
-}
-#endif
+	if (select_g_alloc_jobinfo (&job_desc_ptr->select_jobinfo)
+	||  select_g_unpack_jobinfo(job_desc_ptr->select_jobinfo, buffer))
+		goto unpack_error;
+	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
+		SELECT_DATA_GEOMETRY, job_desc_ptr->geometry);
+	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
+		SELECT_DATA_CONN_TYPE, &job_desc_ptr->conn_type);
+	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
+		SELECT_DATA_ROTATE, &job_desc_ptr->rotate);
+	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
+		SELECT_DATA_NODE_USE, &job_desc_ptr->node_use);
 
 	return SLURM_SUCCESS;
 
       unpack_error:
+	select_g_free_jobinfo(&job_desc_ptr->select_jobinfo);
 	xfree(job_desc_ptr->features);
 	xfree(job_desc_ptr->name);
 	xfree(job_desc_ptr->partition);
@@ -2686,9 +2683,7 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer)
 	pack16(msg->envc, buffer);
 	packstr_array(msg->environment, msg->envc, buffer);
 
-#ifdef HAVE_BGL
-	packstr(msg->bgl_part_id, buffer);
-#endif
+	select_g_pack_jobinfo(msg->select_jobinfo, buffer);
 }
 
 static int
@@ -2739,10 +2734,9 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer)
 	safe_unpackstr_array(&launch_msg_ptr->environment,
 			     &launch_msg_ptr->envc, buffer);
 
-#ifdef HAVE_BGL
-	safe_unpackstr_xmalloc(&launch_msg_ptr->bgl_part_id, 
-		&uint16_tmp, buffer);
-#endif
+	if (select_g_alloc_jobinfo (&launch_msg_ptr->select_jobinfo)
+	||  select_g_unpack_jobinfo(launch_msg_ptr->select_jobinfo, buffer))
+		goto unpack_error;
 
 	return SLURM_SUCCESS;
 
@@ -2755,9 +2749,7 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer)
 	xfree(launch_msg_ptr->out);
 	xfree(launch_msg_ptr->argv);
 	xfree(launch_msg_ptr->environment);
-#ifdef HAVE_BGL
-	xfree(launch_msg_ptr->bgl_part_id);
-#endif
+	select_g_free_jobinfo(&launch_msg_ptr->select_jobinfo);
 	xfree(launch_msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
diff --git a/src/common/switch.h b/src/common/switch.h
index 47e26234a669536391c5ffd2761932bbbc57f2bb..e3619bc3b472af88b880ce51045aecdcfbaffaa1 100644
--- a/src/common/switch.h
+++ b/src/common/switch.h
@@ -42,13 +42,16 @@
 #ifndef __switch_jobinfo_t_defined
 #  define __switch_jobinfo_t_defined
    typedef struct switch_jobinfo   *switch_jobinfo_t;
+#endif
+#ifndef __switch_node_info_t_defined
+#  define __switch_node_info_t_defined
    typedef struct switch_node_info *switch_node_info_t;
 #endif
 typedef struct slurm_switch_context * slurm_switch_context_t;
 
 /*****************************************\
  * GLOBAL SWITCH STATE MANGEMENT FUNCIONS *
-\ *****************************************/
+\*****************************************/
 
 /* initialize the switch plugin */
 extern int  switch_init   (void);
diff --git a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
index 38c62103d2ad3045201e59165b8c6e343c180050..92be656a150cd79a7e7d153b13ee8f8e7a4a1c0f 100644
--- a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
+++ b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
@@ -52,14 +52,8 @@
 #include "src/common/xstring.h"
 #include "src/slurmctld/slurmctld.h"
 
-#ifdef HAVE_BGL
-#  define JOB_FORMAT "JobId=%lu UserId=%s(%lu) Name=%s JobState=%s Partition=%s "\
-		"TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s "\
-		"Geometry=%ux%ux%u Rotate=%u ConnType=%s NodeUse=%s\n"
-#else
-#  define JOB_FORMAT "JobId=%lu UserId=%s(%lu) Name=%s JobState=%s Partition=%s "\
-		"TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s\n"
-#endif
+#define JOB_FORMAT "JobId=%lu UserId=%s(%lu) Name=%s JobState=%s Partition=%s "\
+		"TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s %s\n"
  
 /* Type for error string table entries */
 typedef struct {
@@ -194,6 +188,7 @@ int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 	int rc = SLURM_SUCCESS;
 	char job_rec[256];
 	char usr_str[32], start_str[32], end_str[32], lim_str[32];
+	char select_buf[80];
 	size_t offset = 0, tot_size, wrote;
 	enum job_states job_state;
 
@@ -218,19 +213,16 @@ int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 	_make_time_str(&(job_ptr->start_time), start_str, sizeof(start_str));
 	_make_time_str(&(job_ptr->end_time),   end_str,   sizeof(end_str));
 
+	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
+		select_buf, sizeof(select_buf), SELECT_PRINT_MIXED);
+
 	snprintf(job_rec, sizeof(job_rec), JOB_FORMAT,
 			(unsigned long) job_ptr->job_id, usr_str, 
 			(unsigned long) job_ptr->user_id, job_ptr->name, 
 			job_state_string(job_state), 
 			job_ptr->partition, lim_str, start_str, 
-			end_str, job_ptr->nodes
-#ifdef HAVE_BGL
-			, job_ptr->geometry[0], job_ptr->geometry[1], 
-			job_ptr->geometry[2], job_ptr->rotate,
-			job_conn_type_string(job_ptr->conn_type),
-			job_node_use_string(job_ptr->node_use)
-#endif
-			);
+			end_str, job_ptr->nodes,
+			select_buf);
 	tot_size = strlen(job_rec);
 
 	while ( offset < tot_size ) {
diff --git a/src/plugins/select/bluegene/bluegene.c b/src/plugins/select/bluegene/bluegene.c
index 99a80d9c55cbf046b456c548c01dc2f43e8381c9..9d8a73af3432250642f936195c401ec55a5c4e98 100644
--- a/src/plugins/select/bluegene/bluegene.c
+++ b/src/plugins/select/bluegene/bluegene.c
@@ -26,6 +26,7 @@
 #include <stdlib.h>
 #include "src/slurmctld/proc_req.h"
 #include "src/common/list.h"
+#include "src/common/node_select.h"
 #include "src/common/read_config.h"
 #include "src/common/parse_spec.h"
 #include "src/common/xstring.h"
@@ -818,8 +819,19 @@ int _find_best_partition_match(struct job_record* job_ptr, bitstr_t* slurm_part_
 	bgl_record_t* record;
 	int i, num_dim_best, cur_dim_match;
 	uint16_t* geometry = NULL;
+	uint16_t req_geometry[SYSTEM_DIMENSIONS];
+	uint16_t conn_type, node_use, rotate;
 	sort_bgl_record_inc_size(bgl_list);
 
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_CONN_TYPE, &conn_type);
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_GEOMETRY, req_geometry);
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_NODE_USE, &node_use);
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_ROTATE, &rotate);
+
 	/** this is where we should have the control flow depending on
 	    the spec arguement*/
 	num_dim_best = 0;
@@ -877,21 +889,21 @@ int _find_best_partition_match(struct job_record* job_ptr, bitstr_t* slurm_part_
 			error("find_best_partition_match record->part_type is NULL"); 
 			continue;
 		}
-		debug("conn_type %d", job_ptr->conn_type);
-		if (job_ptr->conn_type != *(record->part_type) &&
-		    job_ptr->conn_type != RM_NAV){
+		debug("conn_type %d", conn_type);
+		if (conn_type != *(record->part_type) &&
+		    conn_type != RM_NAV){
 			continue;
 		} 
 		/*****************************************/
 		/** match up geometry as "best" possible */
 		/*****************************************/
-		if (job_ptr->geometry[0] == 0){
+		if (req_geometry[0] == 0){
 			debug("find_best_partitionmatch: we don't care about geometry");
 			*found_bgl_record = record;
 			break;
 		}
-		if (job_ptr->rotate)
-			rotate_part(job_ptr->geometry, &geometry); 
+		if (rotate)
+			rotate_part(req_geometry, &geometry); 
 		
 		cur_dim_match = 0;
 		for (i=0; i<SYSTEM_DIMENSIONS; i++){
@@ -905,7 +917,7 @@ int _find_best_partition_match(struct job_record* job_ptr, bitstr_t* slurm_part_
 			 * we should distinguish between an exact match and a
 			 * fuzzy match (being greater than
 			 */
-			if (record->alloc_part->dimensions[i] >= job_ptr->geometry[i]){
+			if (record->alloc_part->dimensions[i] >= req_geometry[i]){
 				cur_dim_match++;
 			}
 		}
@@ -989,6 +1001,7 @@ int submit_job(struct job_record *job_ptr, bitstr_t *slurm_part_bitmap,
 {
 	int spec = 1; // this will be like, keep TYPE a priority, etc, blah blah.
 	bgl_record_t* record;
+	char buf[100];
 
 	debug("bluegene::submit_job");
 	/*
@@ -998,9 +1011,9 @@ int submit_job(struct job_record *job_ptr, bitstr_t *slurm_part_bitmap,
 	}
 	*/
 	debug("******** job request ********");
-	debug("geometry:\t%d %d %d", job_ptr->geometry[0], job_ptr->geometry[1], job_ptr->geometry[2]);
-	debug("conn_type:\t%s", convert_part_type(&job_ptr->conn_type));
-	debug("rotate:\t%d", job_ptr->rotate);
+	select_g_sprint_jobinfo(job_ptr->select_jobinfo, buf, sizeof(buf), 
+		SELECT_PRINT_MIXED);
+	debug("%s", buf);
 	debug("min_nodes:\t%d", min_nodes);
 	debug("max_nodes:\t%d", max_nodes);
 	_print_bitmap(slurm_part_bitmap);
@@ -1014,15 +1027,12 @@ int submit_job(struct job_record *job_ptr, bitstr_t *slurm_part_bitmap,
 		/* since the bgl_part_id is a number, (most likely single digit), 
 		 * we'll create an LLNL_#, i.e. LLNL_4 = 6 chars + 1 for NULL
 		 */
-		job_ptr->bgl_part_id = (char*) xmalloc(sizeof(char)*7);
-		if (!(job_ptr->bgl_part_id)){
-			error("submit_job: not enough memory for fake bgl_part_id");
-			return SLURM_ERROR;
-		}
-
-		xstrfmtcat(job_ptr->bgl_part_id, "LLNL_%i", *(record->bgl_part_id));
-		debug("found fake bgl_part_id %s", job_ptr->bgl_part_id);
-		/* calling function must free the bgl_part_id */
+		char *bgl_part_id = NULL;
+		xstrfmtcat(bgl_part_id, "LLNL_%i", *(record->bgl_part_id));
+		debug("found fake bgl_part_id %s", bgl_part_id);
+		select_g_set_jobinfo(job_ptr->select_jobinfo,
+			SELECT_DATA_PART_ID, bgl_part_id);
+		xfree(bgl_part_id);
 	}
 
 	/** we should do the BGL stuff here like, init BGL job stuff... */
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index bfde18849d174e59bbf2697c17929e894e855b78..f721a7f4c5f29777b31f6ecd3f301ed36fee06e1 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -43,6 +43,7 @@
 
 #include "src/common/list.h"
 #include "src/common/log.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
@@ -387,6 +388,10 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 
 extern int select_p_job_init(struct job_record *job_ptr)
 {
+	/* FIXME: Remove this statement after Blue Gene testing is complete */
+	select_g_set_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_PART_ID, 
+		"TESTING");
+
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index dbf544d419ccaeb89bcc557cddbbd167a6e779ff..10e9092e1daa31d1845e8f852ff7f3e4c69bde4d 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -1518,7 +1518,6 @@ _update_job (int argc, char *argv[])
 			job_msg.dependency =
 				(uint32_t) strtol(&argv[i][11],
 					(char **) NULL, 10);
-#ifdef HAVE_BGL
 		else if (strncasecmp(argv[i], "Geometry=", 9) == 0) {
 			char* token, *delimiter = ",x", *next_ptr;
 			int j, rc = 0;
@@ -1585,7 +1584,6 @@ _update_job (int argc, char *argv[])
 					(uint16_t) strtol(&argv[i][8], 
 							(char **) NULL, 10);
 		}
-#endif
 		else {
 			exit_code = 1;
 			fprintf (stderr, "Invalid input: %s\n", argv[i]);
diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am
index cfbe4e06478abb46c78c814fecb44c07511207d2..d6135dae81da67f7f2744a7fe79246f32cca0e7d 100644
--- a/src/slurmctld/Makefile.am
+++ b/src/slurmctld/Makefile.am
@@ -24,7 +24,7 @@ slurmctld_SOURCES = 	\
 	locks.c   	\
 	locks.h  	\
 	node_mgr.c 	\
-	node_scheduler.c\
+	node_scheduler.c node_scheduler.h \
 	partition_mgr.c \
 	ping_nodes.c	\
 	ping_nodes.h	\
@@ -35,8 +35,6 @@ slurmctld_SOURCES = 	\
 	sched_plugin.c	\
 	sched_plugin.h	\
 	sched_upcalls.c	\
-	select_plugin.c	\
-	select_plugin.h	\
 	slurmctld.h	\
 	srun_comm.c	\
 	srun_comm.h	\
diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index e0bf9359c2e544f20dd60aac81194dcc652c9dbb..b164630c0bc75574eee5e3126fb9beeabc508cf6 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -61,6 +61,7 @@
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/xsignal.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
@@ -905,9 +906,6 @@ static void _slurmctld_free_job_launch_msg(batch_job_launch_msg_t * msg)
 			xfree(msg->environment[0]);
 			xfree(msg->environment);
 		}
-#ifdef HAVE_BGL
-		xfree(msg->bgl_part_id);
-#endif
 		slurm_free_job_launch_msg(msg);
 	}
 }
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 8a9a1fc4358e484b0991f606df0bc2d412e24854..f7a22037db2ac539b060877c2d8a6b7de8160f75 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -52,6 +52,7 @@
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/pack.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_auth.h"
@@ -68,7 +69,6 @@
 #include "src/slurmctld/read_config.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/sched_plugin.h"
-#include "src/slurmctld/select_plugin.h"
 #include "src/slurmctld/srun_comm.h"
 #include "src/slurmctld/state_save.h"
 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 9796a2211e6b31eaaa25007e56a10d57f8846f41..e2c27e6d69209d4eee1ba6e9ac02dfe46f09f224 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -44,12 +44,15 @@
 
 #include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_jobcomp.h"
 #include "src/common/switch.h"
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
+
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/locks.h"
+#include "src/slurmctld/node_scheduler.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/sched_plugin.h"
@@ -418,18 +421,8 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	packstr(dump_job_ptr->name, buffer);
 	packstr(dump_job_ptr->alloc_node, buffer);
 	packstr(dump_job_ptr->account, buffer);
-
-#ifdef HAVE_BGL
-{
-	int i;
-	packstr(dump_job_ptr->bgl_part_id, buffer);
-	pack16(dump_job_ptr->conn_type, buffer);
-	pack16(dump_job_ptr->rotate, buffer);
-	pack16(dump_job_ptr->node_use, buffer);
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		pack16(dump_job_ptr->geometry[i], buffer);
-}
-#endif
+	select_g_pack_jobinfo(dump_job_ptr->select_jobinfo,
+		buffer);
 
 	/* Dump job details, if available */
 	detail_ptr = dump_job_ptr->details;
@@ -461,14 +454,10 @@ static int _load_job_state(Buf buffer)
 	uint16_t kill_on_node_fail, kill_on_step_done, name_len, port;
 	char *nodes = NULL, *partition = NULL, *name = NULL;
 	char *alloc_node = NULL, *host = NULL, *account = NULL;
-#ifdef HAVE_BGL
-	int i;
-	uint16_t conn_type, node_use, rotate, geometry[SYSTEM_DIMENSIONS];
-	char *bgl_part_id;
-#endif
 	struct job_record *job_ptr;
 	struct part_record *part_ptr;
 	int error_code;
+	select_jobinfo_t select_jobinfo = NULL;
 
 	safe_unpack32(&job_id, buffer);
 	safe_unpack32(&user_id, buffer);
@@ -496,39 +485,11 @@ static int _load_job_state(Buf buffer)
 	safe_unpackstr_xmalloc(&alloc_node, &name_len, buffer);
 	safe_unpackstr_xmalloc(&account, &name_len, buffer);
 
-#ifdef HAVE_BGL
-	safe_unpackstr_xmalloc(&bgl_part_id, &name_len, buffer);
-	safe_unpack16(&conn_type, buffer);
-	safe_unpack16(&rotate, buffer);
-	safe_unpack16(&node_use, buffer);
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		safe_unpack16(&geometry[i], buffer);
-#endif
-
-	/* validity test as possible */
-#ifdef HAVE_BGL
-	if ((conn_type != RM_MESH)
-	&&  (conn_type != RM_TORUS)
-	&&  (conn_type != RM_NAV)) {
-		error("Invalid data for job %u: conn_type=%u",
-			job_id, conn_type);
-		goto unpack_error;
-	}
-
-	if (rotate > 1) {
-		error("Invalid data for job %u: rotate=%u",
-			job_id, rotate);
+	if (select_g_alloc_jobinfo(&select_jobinfo)
+	||  select_g_unpack_jobinfo(select_jobinfo, buffer))
 		goto unpack_error;
-	}
-
-	if ((node_use != RM_VIRTUAL)
-	&&  (node_use != RM_COPROCESSOR)) {
-		error("Invalid data for job %u: node_use=%u",
-			job_id, node_use);
-		goto unpack_error;
-	}
-#endif
 
+	/* validity test as possible */
 	if (((job_state & (~JOB_COMPLETING)) >= JOB_END) || 
 	    (batch_flag > 1)) {
 		error("Invalid data for job %u: job_state=%u batch_flag=%u",
@@ -608,14 +569,8 @@ static int _load_job_state(Buf buffer)
 	job_ptr->batch_flag        = batch_flag;
 	job_ptr->port              = port;
 	job_ptr->host              = host;
-#ifdef HAVE_BGL
-	job_ptr->bgl_part_id       = bgl_part_id;
-	job_ptr->conn_type         = conn_type;
-	job_ptr->rotate            = rotate;
-	job_ptr->node_use          = node_use;
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		job_ptr->geometry[i] = geometry[i];
-#endif
+	job_ptr->select_jobinfo = select_jobinfo;
+
 	build_node_details(job_ptr);	/* set: num_cpu_groups, cpus_per_node, 
 					 *	cpu_count_reps, node_cnt, and
 					 *	node_addr */
@@ -638,6 +593,7 @@ static int _load_job_state(Buf buffer)
 	xfree(name);
 	xfree(alloc_node);
 	xfree(account);
+	select_g_free_jobinfo(&select_jobinfo);
 	return SLURM_FAILURE;
 }
 
@@ -1055,6 +1011,7 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	long job_id, min_procs, min_memory, min_tmp_disk, num_procs;
 	long min_nodes, max_nodes, time_limit, priority, contiguous;
 	long kill_on_node_fail, shared, task_dist, immediate, dependency;
+	char buf[100];
 
 	if (job_specs == NULL)
 		return;
@@ -1149,43 +1106,10 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	       job_specs->host, job_specs->port,
 	       dependency, job_specs->account);
 
-#ifdef HAVE_BGL
-{
-	char *conn_type, *rotate, *node_use;
-	int geometry[SYSTEM_DIMENSIONS];
-
-	if (job_specs->conn_type == RM_MESH)
-		conn_type = "MESH";
-	else if (job_specs->conn_type == RM_TORUS)
-		conn_type = "TORUS";
-	else 
-		conn_type = "NAV";
-
-	if (job_specs->rotate == 0)
-		rotate = "NO";
-	else
-		rotate = "YES";
-
-	if (job_specs->node_use == RM_VIRTUAL)
-		node_use = "VIRTUAL";
-	else
-		node_use = "COPROCESSOR";
-
-	if (job_specs->geometry[0] == (uint16_t) NO_VAL) {
-		geometry[0] = -1;
-		geometry[1] = -1;
-		geometry[2] = -1;
-	} else {
-		geometry[0] = job_specs->geometry[0];
-		geometry[1] = job_specs->geometry[1];
-		geometry[2] = job_specs->geometry[2];
-	}
-
-	debug3("   conn_type=%s rotate=%s node_use=%s geometry=%d,%d,%d",
-		conn_type, rotate, node_use,
-		geometry[0], geometry[1], geometry[2]);
-}
-#endif
+	select_g_sprint_jobinfo(job_specs->select_jobinfo, 
+		buf, sizeof(buf), SELECT_PRINT_MIXED);
+	if (buf[0] != '\0')
+		debug3("   %s", buf);
 }
 
 
@@ -1643,7 +1567,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 
 	if (job_desc->min_nodes == NO_VAL)
 		job_desc->min_nodes = 1;
-#ifdef SYSTEM_DIMENSIONS
+#if SYSTEM_DIMENSIONS
 	if ((job_desc->geometry[0] != (uint16_t) NO_VAL)
 	&&  (job_desc->geometry[0] != 0)) {
 		int i, tot = 1;
@@ -2154,20 +2078,21 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 		detail_ptr->out = xstrdup(job_desc->out);
 	if (job_desc->work_dir)
 		detail_ptr->work_dir = xstrdup(job_desc->work_dir);
-#ifdef HAVE_BGL
-	if (SYSTEM_DIMENSIONS
-	&&  (job_desc->geometry[0] != (uint16_t) NO_VAL)) {
-		int i;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			job_ptr->geometry[i] = job_desc->geometry[i];
-	}
-	if (job_desc->conn_type != (uint16_t) NO_VAL)
-		job_ptr->conn_type = job_desc->conn_type;
-	if (job_desc->rotate != (uint16_t) NO_VAL)
-		job_ptr->rotate = job_desc->rotate;
-	if (job_desc->node_use != (uint16_t) NO_VAL)
-		job_ptr->node_use = job_desc->node_use;
-#endif
+
+	if (select_g_alloc_jobinfo(&job_ptr->select_jobinfo))
+		return SLURM_ERROR;
+	select_g_set_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_GEOMETRY, 
+		job_desc->geometry);
+	select_g_set_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_CONN_TYPE, 
+		&job_desc->conn_type);
+	select_g_set_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_ROTATE, 
+		&job_desc->rotate);
+	select_g_set_jobinfo(job_ptr->select_jobinfo,
+		SELECT_DATA_NODE_USE, 
+		&job_desc->node_use);
 
 	*job_rec_ptr = job_ptr;
 	return SLURM_SUCCESS;
@@ -2353,17 +2278,20 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
 		job_desc_msg->shared = 0;	/* default not shared nodes */
 	if (job_desc_msg->min_procs == NO_VAL)
 		job_desc_msg->min_procs = 1;	/* default 1 cpu per node */
-#ifdef HAVE_BGL 
+
+#if SYSTEM_DIMENSIONS
 	if (job_desc_msg->geometry[0] == (uint16_t) NO_VAL) {
-		int i;	/* geometry doesn't matter */
+		int i;
 		for (i=0; i<SYSTEM_DIMENSIONS; i++)
 			job_desc_msg->geometry[i] = 0;
 	}
+#endif
 	if (job_desc_msg->conn_type == (uint16_t) NO_VAL)
 		job_desc_msg->conn_type = RM_NAV;  /* try TORUS, then MESH */
+	if (job_desc_msg->node_use == (uint16_t) NO_VAL)
+		job_desc_msg->node_use = RM_COPROCESSOR;
 	if (job_desc_msg->rotate == (uint16_t) NO_VAL)
 		job_desc_msg->rotate = true;    /* default to allow rotate */
-#endif
 
 	return SLURM_SUCCESS;
 }
@@ -2403,9 +2331,7 @@ static void _list_delete_job(void *job_entry)
 	xfree(job_ptr->node_addr);
 	xfree(job_ptr->host);
 	xfree(job_ptr->account);
-#ifdef HAVE_BGL
-	xfree(job_ptr->bgl_part_id);
-#endif
+	select_g_free_jobinfo(&job_ptr->select_jobinfo);
 	if (job_ptr->step_list) {
 		delete_all_step_records(job_ptr);
 		list_destroy(job_ptr->step_list);
@@ -2556,18 +2482,9 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer)
 	packstr(dump_job_ptr->alloc_node, buffer);
 	pack_bit_fmt(dump_job_ptr->node_bitmap, buffer);
 	pack32(dump_job_ptr->num_procs, buffer);
+	
+	select_g_pack_jobinfo(dump_job_ptr->select_jobinfo, buffer);
 
-#ifdef HAVE_BGL
-{
-	int i;
-	packstr(dump_job_ptr->bgl_part_id, buffer);
-	pack16(dump_job_ptr->conn_type, buffer);
-	pack16(dump_job_ptr->rotate, buffer);
-	pack16(dump_job_ptr->node_use, buffer);
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		pack16(dump_job_ptr->geometry[i], buffer);
-}
-#endif
 	detail_ptr = dump_job_ptr->details;
 	if (detail_ptr && dump_job_ptr->job_state == JOB_PENDING)
 		_pack_job_details(detail_ptr, buffer);
@@ -3142,26 +3059,33 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			job_ptr->dependency = job_specs->dependency;
 	}
 
-#ifdef HAVE_BGL
+#if SYSTEM_DIMENSIONS
 	if (job_specs->geometry[0] != (uint16_t) NO_VAL) {
 		int i, tot = 1;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+		for (i=0; i<SYSTEM_DIMENSIONS; i++)
 			tot *= job_specs->geometry[i];
-			job_ptr->geometry[i] = job_specs->geometry[i];
-		}
 		detail_ptr->min_nodes = tot;
 		detail_ptr->max_nodes = tot;
+		select_g_set_jobinfo(job_ptr->select_jobinfo,
+			SELECT_DATA_GEOMETRY,
+			job_specs->geometry);
 	}
+#endif
 
 	if (job_specs->conn_type != (uint16_t) NO_VAL)
-		job_ptr->conn_type = job_specs->conn_type;
+		select_g_set_jobinfo(job_ptr->select_jobinfo,
+			SELECT_DATA_CONN_TYPE,
+			&job_specs->conn_type);
 
 	if (job_specs->rotate != (uint16_t) NO_VAL)
-		job_ptr->rotate = job_specs->rotate;
+		select_g_set_jobinfo(job_ptr->select_jobinfo,
+			SELECT_DATA_ROTATE,
+			&job_specs->rotate);
 
 	if (job_specs->node_use != (uint16_t) NO_VAL)
-		job_ptr->node_use = job_specs->node_use;
-#endif
+		select_g_set_jobinfo(job_ptr->select_jobinfo,
+			SELECT_DATA_NODE_USE,
+			&job_specs->node_use);
 
 	return error_code;
 }
@@ -3508,8 +3432,8 @@ _xmit_new_end_time(struct job_record *job_ptr)
 			node_names[MAX_NAME_LEN * agent_args->node_count],
 			node_record_table_ptr[i].name, MAX_NAME_LEN);
 		agent_args->node_count++;
-#ifdef HAVE_BGL
-		break;	/* only do one front-end node */
+#ifdef HAVE_BGL		/* operation only on front-end node */
+		break;
 #endif
 	}
 
@@ -3531,15 +3455,15 @@ bool job_epilog_complete(uint32_t job_id, char *node_name,
 		uint32_t return_code)
 {
 	struct job_record  *job_ptr = find_job_record(job_id);
-#ifdef HAVE_BGL
-	int i;
-	struct node_record *node_ptr;
-#endif
 
 	if (job_ptr == NULL)
 		return true;
 
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* only front-end node */
+{
+	int i;
+	struct node_record *node_ptr;
+
 	if (return_code)
 		error("Epilog error on %s, setting DOWN", 
 			job_ptr->nodes);
@@ -3552,6 +3476,7 @@ bool job_epilog_complete(uint32_t job_id, char *node_name,
 		else
 			make_node_idle(node_ptr, job_ptr);
 	}
+}
 #else
 	if (return_code) {
 		error("Epilog error on %s, setting DOWN", node_name);
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index c6402168a6ff2862ce4fd05278bfccd721fdbe20..8257fe1f06b6b79deaa2b3d143380e39f8190de3 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -37,10 +37,13 @@
 
 #include "src/common/list.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
+
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/locks.h"
+#include "src/slurmctld/node_scheduler.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/srun_comm.h"
 
@@ -281,9 +284,7 @@ static void _launch_job(struct job_record *job_ptr)
 	memcpy(launch_msg_ptr->cpu_count_reps, job_ptr->cpu_count_reps,
 			(sizeof(uint32_t) * job_ptr->num_cpu_groups));
 
-#ifdef HAVE_BGL
-	launch_msg_ptr->bgl_part_id = xstrdup(job_ptr->bgl_part_id);
-#endif
+	launch_msg_ptr->select_jobinfo = select_g_copy_jobinfo(job_ptr->select_jobinfo);
 
 	agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
 	agent_arg_ptr->node_count = 1;
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index de974696ecd68ee92ba685ce6b48a48302dd299a..ba373c6ea61c4d4c58b3179fbd3b1791912ad65e 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -1317,7 +1317,7 @@ static void _sync_bitmaps(struct node_record *node_ptr, int job_count)
 void node_did_resp (char *name)
 {
 	struct node_record *node_ptr;
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* only front-end node */
 	int i;
 
 	for (i=0; i<node_record_count; i++) {
@@ -1383,7 +1383,7 @@ static void _node_did_resp(struct node_record *node_ptr)
 void node_not_resp (char *name, time_t msg_time)
 {
 	struct node_record *node_ptr;
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* only front-end node */
 	int i;
 
 	for (i=0; i<node_record_count; i++) {
@@ -1572,8 +1572,8 @@ void msg_to_slurmd (slurm_msg_type_t msg_type)
 		strncpy (&kill_agent_args->node_names[pos],
 		         node_record_table_ptr[i].name, MAX_NAME_LEN);
 		kill_agent_args->node_count++;
-#ifdef HAVE_BGL
-		break;	/* only done one front-end node */
+#ifdef HAVE_BGL		/* only done one front-end node */
+		break;
 #endif
 	}
 
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 7ed2b54312eec4892d347edf7532d9a7f9abc579..f0c9c46d08063e1da3e8750c72c13080dc20ca0e 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -40,13 +40,14 @@
 #include <slurm/slurm_errno.h>
 
 #include "src/common/hostlist.h"
+#include "src/common/node_select.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/node_scheduler.h"
 #include "src/slurmctld/sched_plugin.h"
-#include "src/slurmctld/select_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 
 #define BUF_SIZE 1024
@@ -92,7 +93,7 @@ static int _valid_features(char *requested, char *available);
  *	node_record_table_ptr - pointer to global node table
  *	last_node_update - last update time of node table
  */
-void allocate_nodes(struct job_record *job_ptr)
+extern void allocate_nodes(struct job_record *job_ptr)
 {
 	int i;
 
@@ -113,7 +114,7 @@ void allocate_nodes(struct job_record *job_ptr)
  * globals: node_record_count - number of nodes configured
  *	node_record_table_ptr - pointer to global node table
  */
-int count_cpus(unsigned *bitmap)
+extern int count_cpus(unsigned *bitmap)
 {
 	int i, sum;
 
@@ -139,7 +140,7 @@ int count_cpus(unsigned *bitmap)
  * globals: node_record_count - number of nodes in the system
  *	node_record_table_ptr - pointer to global node table
  */
-void deallocate_nodes(struct job_record *job_ptr, bool timeout)
+extern void deallocate_nodes(struct job_record *job_ptr, bool timeout)
 {
 	int i;
 	kill_job_msg_t *kill_job;
@@ -177,7 +178,7 @@ void deallocate_nodes(struct job_record *job_ptr, bool timeout)
 			job_ptr->node_cnt--;
 		}
 		make_node_comp(node_ptr, job_ptr);
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Only operate on front-end node */
 		if (agent_args->node_count > 0)
 			continue;
 #endif
@@ -385,7 +386,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	bool runable_ever  = false;	/* Job can ever run */
 	bool runable_avail = false;	/* Job can run with available nodes */
 
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* no sharing Blue Gene nodes */
 	if (shared) {
 		error("attempt to share Blue Gene nodes ignored");
 		shared = 0;
@@ -599,7 +600,7 @@ _add_node_set_info(struct node_set *node_set_ptr,
  *	   the request, (e.g. best-fit or other criterion)
  *	3) Call allocate_nodes() to perform the actual allocation
  */
-int select_nodes(struct job_record *job_ptr, bool test_only)
+extern int select_nodes(struct job_record *job_ptr, bool test_only)
 {
 	int error_code = SLURM_SUCCESS, i, shared, node_set_size = 0;
 	bitstr_t *select_bitmap = NULL;
@@ -932,7 +933,7 @@ static int _nodes_in_sets(bitstr_t *req_bitmap,
  *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
  * IN job_ptr - pointer to a job record
  */
-void build_node_details(struct job_record *job_ptr)
+extern void build_node_details(struct job_record *job_ptr)
 {
 	hostlist_t host_list = NULL;
 	struct node_record *node_ptr;
@@ -1158,9 +1159,9 @@ extern void re_kill_job(struct job_record *job_ptr)
 		if (node_ptr->node_state & NODE_STATE_NO_RESPOND)
 			continue;
 		(void) hostlist_push_host(kill_hostlist, node_ptr->name);
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL			/* only do one front-end node */
 		if (agent_args->node_count > 0)
-			continue;	/* only do one front-end node */
+			continue;
 #endif
 		if ((agent_args->node_count + 1) > buf_rec_size) {
 			buf_rec_size += 128;
diff --git a/src/slurmctld/node_scheduler.h b/src/slurmctld/node_scheduler.h
new file mode 100644
index 0000000000000000000000000000000000000000..4429adf9de3397dcbea20a86e04ae2a22a332e6b
--- /dev/null
+++ b/src/slurmctld/node_scheduler.h
@@ -0,0 +1,94 @@
+/*****************************************************************************\
+ *  node_scheduler.h - definitions of functions in node_scheduler.c
+ *****************************************************************************
+ *  Copyright (C) 2004 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette@llnl.gov> et. al.
+ *  UCRL-CODE-2002-040.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_NODE_SCHEDULER_H
+#define _HAVE_NODE_SCHEDULER_H
+
+/*
+ * allocate_nodes - change state of specified nodes to NODE_STATE_ALLOCATED
+ * IN job_ptr - job being allocated resources
+ * globals: node_record_count - number of nodes in the system
+ *	node_record_table_ptr - pointer to global node table
+ *	last_node_update - last update time of node table
+ */
+extern void allocate_nodes(struct job_record *job_ptr);
+
+/*
+ * build_node_details - set cpu counts and addresses for allocated nodes:
+ *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
+ * IN job_ptr - pointer to a job record
+ */
+extern void build_node_details(struct job_record *job_ptr);
+
+/*
+ * count_cpus - report how many cpus are associated with the identified nodes 
+ * IN bitmap - map of nodes to tally
+ * RET cpu count
+ * globals: node_record_count - number of nodes configured
+ *	node_record_table_ptr - pointer to global node table
+ */
+extern int count_cpus(unsigned *bitmap);
+
+/*
+ * deallocate_nodes - for a given job, deallocate its nodes and make 
+ *	their state NODE_STATE_COMPLETING
+ * IN job_ptr - pointer to terminating job (already in some COMPLETING state)
+ * IN timeout - true of job exhausted time limit, send REQUEST_KILL_TIMELIMIT
+ *	RPC instead of REQUEST_KILL_JOB
+ * globals: node_record_count - number of nodes in the system
+ *	node_record_table_ptr - pointer to global node table
+ */
+extern void deallocate_nodes(struct job_record *job_ptr, bool timeout);
+
+/*
+ * re_kill_job - for a given job, deallocate its nodes for a second time, 
+ *	basically a cleanup for failed deallocate() calls
+ * IN job_ptr - pointer to terminating job (already in some COMPLETING state)
+ * globals: node_record_count - number of nodes in the system
+ *	node_record_table_ptr - pointer to global node table
+ */
+extern void re_kill_job(struct job_record *job_ptr);
+
+/*
+ * select_nodes - select and allocate nodes to a specific job
+ * IN job_ptr - pointer to the job record
+ * IN test_only - if set do not allocate nodes, just confirm they  
+ *	could be allocated now
+ * RET 0 on success, ESLURM code from slurm_errno.h otherwise
+ * globals: list_part - global list of partition info
+ *	default_part_loc - pointer to default partition 
+ *	config_list - global list of node configuration info
+ * Notes: The algorithm is
+ *	1) Build a table (node_set_ptr) of nodes with the requisite 
+ *	   configuration. Each table entry includes their weight, 
+ *	   node_list, features, etc.
+ *	2) Call _pick_best_nodes() to select those nodes best satisfying 
+ *	   the request, (e.g. best-fit or other criterion)
+ *	3) Call allocate_nodes() to perform the actual allocation
+ */
+extern int select_nodes(struct job_record *job_ptr, bool test_only);
+
+#endif /* !_HAVE_NODE_SCHEDULER_H*/
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 477637052194e22b1925b11fa51da39baec7ab6c..ab597641df369f1bb52bc1dda530f20fce9b4d93 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -44,11 +44,12 @@
 
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
+#include "src/common/node_select.h"
 #include "src/common/pack.h"
 #include "src/common/xstring.h"
+
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/proc_req.h"
-#include "src/slurmctld/select_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 
 #define BUF_SIZE 1024
diff --git a/src/slurmctld/ping_nodes.c b/src/slurmctld/ping_nodes.c
index 04256e48f0210de47bd48ec8c61cef03d7cd5d7e..f6df1da012acaa01fd2ff7a2ebaefe9e0e7a5462 100644
--- a/src/slurmctld/ping_nodes.c
+++ b/src/slurmctld/ping_nodes.c
@@ -175,8 +175,8 @@ void ping_nodes (void)
 			continue;
 		}
 
-#ifdef HAVE_BGL
-		if (i > 0)	/* Only one front-end node to ping/register */
+#ifdef HAVE_BGL			/* Only one front-end node */
+		if (i > 0)
 			continue;
 #endif
 
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index a3f0a411b717531a651f417a2bf8e14223e46fe0..e4ef2fb4491b5bc5ac840b7b9c3b01940ea084f3 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -46,6 +46,7 @@
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/pack.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_auth.h"
@@ -414,9 +415,7 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		alloc_msg.node_cnt       = job_ptr->node_cnt;
 		alloc_msg.node_list      = xstrdup(job_ptr->nodes);
 		alloc_msg.num_cpu_groups = job_ptr->num_cpu_groups;
-#ifdef HAVE_BGL
-		alloc_msg.bgl_part_id    = xstrdup(job_ptr->bgl_part_id);
-#endif
+		alloc_msg.select_jobinfo = select_g_copy_jobinfo(job_ptr->select_jobinfo);
 		unlock_slurmctld(job_write_lock);
 
 		response_msg.msg_type = RESPONSE_RESOURCE_ALLOCATION;
@@ -428,9 +427,7 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		xfree(alloc_msg.cpus_per_node);
 		xfree(alloc_msg.node_addr);
 		xfree(alloc_msg.node_list);
-#ifdef HAVE_BGL
-		xfree(alloc_msg.bgl_part_id);
-#endif
+		select_g_free_jobinfo(&alloc_msg.select_jobinfo);
 		schedule_job_save();	/* has own locks */
 		schedule_node_save();	/* has own locks */
 	} else {	/* allocate error */
@@ -499,7 +496,7 @@ static void _slurm_rpc_allocate_and_run(slurm_msg_t * msg)
 
 	req_step_msg.job_id     = job_ptr->job_id;
 	req_step_msg.user_id    = job_desc_msg->user_id;
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Only run on front-end node */
 	req_step_msg.node_count = 1;
 	req_step_msg.cpu_count  = NO_VAL;
 #else
@@ -962,7 +959,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		return;
 	}
 
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* only root allowed to run job steps */
 	/* non-super users not permitted to run job steps on BGL */
 	if (!_is_super_user(uid)) {
 		info("Attempt to execute job step by uid=%u",
@@ -1137,7 +1134,7 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg)
 	if (error_code == SLURM_SUCCESS) {
 		/* do RPC call */
 		lock_slurmctld(job_write_lock);
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* operate only on front-end */
 		error_code = validate_nodes_via_front_end(
 					node_reg_stat_msg->job_count,
 					node_reg_stat_msg->job_id,
@@ -1240,15 +1237,14 @@ static void _slurm_rpc_old_job_alloc(slurm_msg_t * msg)
 		alloc_msg.node_cnt       = job_ptr->node_cnt;
 		alloc_msg.node_list      = xstrdup(job_ptr->nodes);
 		alloc_msg.num_cpu_groups = job_ptr->num_cpu_groups;
-#ifdef HAVE_BGL
-		alloc_msg.bgl_part_id    = xstrdup(job_ptr->bgl_part_id);
-#endif
+		alloc_msg.select_jobinfo = select_g_copy_jobinfo(job_ptr->select_jobinfo);
 		unlock_slurmctld(job_read_lock);
 
 		response_msg.msg_type    = RESPONSE_RESOURCE_ALLOCATION;
 		response_msg.data        = &alloc_msg;
 
 		slurm_send_node_msg(msg->conn_fd, &response_msg);
+		select_g_free_jobinfo(&alloc_msg.select_jobinfo);
 		xfree(alloc_msg.cpu_count_reps);
 		xfree(alloc_msg.cpus_per_node);
 		xfree(alloc_msg.node_addr);
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 5c14570886c40320df482c25e82990ad3aa0c78c..7e56b5c400d408d9843bb959d84dc2caf15d2848 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -43,6 +43,7 @@
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/parse_spec.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_jobcomp.h"
@@ -50,10 +51,10 @@
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/locks.h"
+#include "src/slurmctld/node_scheduler.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/read_config.h"
 #include "src/slurmctld/sched_plugin.h"
-#include "src/slurmctld/select_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 
 #define BUF_SIZE 1024
@@ -309,7 +310,7 @@ static int _parse_node_spec(char *in_line)
 	struct config_record *config_ptr = NULL;
 	hostlist_t addr_list = NULL, host_list = NULL;
 	char *this_node_name;
-#ifndef HAVE_BGL
+#ifndef HAVE_BGL	/* Fake node addresses for front-end */
 	char *this_node_addr;
 #endif
 
@@ -354,7 +355,7 @@ static int _parse_node_spec(char *in_line)
 		xfree(state);
 	}
 
-#ifndef HAVE_BGL
+#ifndef HAVE_BGL	/* Fake node addresses for front-end */
 	if (node_addr &&
 	    ((addr_list = hostlist_create(node_addr)) == NULL)) {
 		error("hostlist_create error for %s: %m", node_addr);
@@ -436,7 +437,7 @@ static int _parse_node_spec(char *in_line)
 			    (state_val != NODE_STATE_UNKNOWN))
 				node_ptr->node_state = state_val;
 			node_ptr->last_response = (time_t) 0;
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Fake node addresses for front-end */
 			if (node_addr)
 				strncpy(node_ptr->comm_name,
 					node_addr, MAX_NAME_LEN);
@@ -614,7 +615,7 @@ static int _parse_part_spec(char *in_line)
 			goto cleanup;
 		}
 		xfree(shared_str);
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* No shared nodes on Blue Gene */
 		if (shared_val != SHARED_NO) {
 			error("Illegal Shared parameter value for partition %s",
 				partition_name);
diff --git a/src/slurmctld/select_plugin.c b/src/slurmctld/select_plugin.c
deleted file mode 100644
index f87a27373d08c484bab66cb2b39b1c791f5ea1fb..0000000000000000000000000000000000000000
--- a/src/slurmctld/select_plugin.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*****************************************************************************\
- *  select_plugin.c - node selection plugin wrapper.
- *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette@llnl.gov>.
- *  UCRL-CODE-2002-040.
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
-\*****************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#  include "config.h"
-#endif
-
-#include <pthread.h>
-
-#include "src/common/list.h"
-#include "src/common/plugin.h"
-#include "src/common/plugrack.h"
-#include "src/common/slurm_protocol_api.h"
-#include "src/common/xstring.h"
-#include "src/slurmctld/slurmctld.h"
-
-/*
- * Local data
- */
-
-typedef struct slurm_select_ops {
-	int		(*state_save)		( char *dir_name );
-	int	       	(*state_restore)	( char *dir_name );
-	int 		(*node_init)		( struct node_record *node_ptr,
-						  int node_cnt);
-	int 		(*part_init)		( List part_list );
-	int		(*job_test)		( struct job_record *job_ptr,
-						  bitstr_t *bitmap, int min_nodes, 
-						  int max_nodes );
-	int		(*job_init)		( struct job_record *job_ptr );
-	int		(*job_fini)		( struct job_record *job_ptr );
-} slurm_select_ops_t;
-
-typedef struct slurm_select_context {
-	char	       	*select_type;
-	plugrack_t     	plugin_list;
-	plugin_handle_t	cur_plugin;
-	int		select_errno;
-	slurm_select_ops_t ops;
-} slurm_select_context_t;
-
-static slurm_select_context_t * g_select_context = NULL;
-static pthread_mutex_t		g_select_context_lock = 
-					PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Local functions
- */
-static slurm_select_context_t *	_select_context_create(const char *select_type);
-static int 			_select_context_destroy(slurm_select_context_t *c);
-static slurm_select_ops_t *	_select_get_ops(slurm_select_context_t *c);
-
-/*
- * Locate and load the appropriate plugin
- */
-static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c)
-{
-	/*
-	 * Must be synchronized with slurm_select_ops_t above.
-	 */
-	static const char *syms[] = {
-		"select_p_state_save",
-		"select_p_state_restore",
-		"select_p_node_init",
-		"select_p_part_init",
-		"select_p_job_test",
-		"select_p_job_init",
-		"select_p_job_fini"
-	};
-	int n_syms = sizeof( syms ) / sizeof( char * );
-
-	/* Get plugin list. */
-	if ( c->plugin_list == NULL ) {
-		char *plugin_dir;
-		c->plugin_list = plugrack_create();
-		if ( c->plugin_list == NULL ) {
-			error( "cannot create plugin manager" );
-			return NULL;
-		}
-		plugrack_set_major_type( c->plugin_list, "select" );
-		plugrack_set_paranoia( c->plugin_list,
-				       PLUGRACK_PARANOIA_NONE,
-				       0 );
-		plugin_dir = slurm_get_plugin_dir();
-		plugrack_read_dir( c->plugin_list, plugin_dir );
-		xfree(plugin_dir);
-	}
-
-	c->cur_plugin = plugrack_use_by_type( c->plugin_list, c->select_type );
-	if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) {
-		error( "cannot find node selection plugin for %s", 
-			c->select_type );
-		return NULL;
-	}
-
-	/* Dereference the API. */
-	if ( plugin_get_syms( c->cur_plugin,
-			      n_syms,
-			      syms,
-			      (void **) &c->ops ) < n_syms ) {
-		error( "incomplete node selection plugin detected" );
-		return NULL;
-	}
-
-	return &c->ops;
-}
-
-/*
- * Create a node selection context
- */
-static slurm_select_context_t *_select_context_create(const char *select_type)
-{
-	slurm_select_context_t *c;
-
-	if ( select_type == NULL ) {
-		debug3( "_select_context_create: no uler type" );
-		return NULL;
-	}
-
-	c = xmalloc( sizeof( slurm_select_context_t ) );
-	c->select_type	= xstrdup( select_type );
-	c->plugin_list	= NULL;
-	c->cur_plugin	= PLUGIN_INVALID_HANDLE;
-	c->select_errno	= SLURM_SUCCESS;
-
-	return c;
-}
-
-/*
- * Destroy a node selection context
- */
-static int _select_context_destroy( slurm_select_context_t *c )
-{
-	/*
-	 * Must check return code here because plugins might still
-	 * be loaded and active.
-	 */
-	if ( c->plugin_list ) {
-		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
-		}
-	}
-
-	xfree( c->select_type );
-	xfree( c );
-
-	return SLURM_SUCCESS;
-}
-
-/*
- * Initialize context for node selection plugin
- */
-extern int slurm_select_init(void)
-{
-	int retval = SLURM_SUCCESS;
-	char *select_type = NULL;
-	
-	slurm_mutex_lock( &g_select_context_lock );
-
-	if ( g_select_context ) goto done;
-
-	select_type = slurm_get_select_type();
-	g_select_context = _select_context_create(select_type);
-	if ( g_select_context == NULL ) {
-		error( "cannot create node selection context for %s",
-			 select_type );
-		retval = SLURM_ERROR;
-		goto done;
-	}
-
-	if ( _select_get_ops( g_select_context ) == NULL ) {
-		error( "cannot resolve node selection plugin operations" );
-		_select_context_destroy( g_select_context );
-		g_select_context = NULL;
-		retval = SLURM_ERROR;
-	}
-
- done:
-	slurm_mutex_unlock( &g_select_context_lock );
-	xfree(select_type);
-	return retval;
-}
-
-/*
- * Save any global state information
- */
-extern int select_g_state_save(char *dir_name)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.state_save))(dir_name);
-}
-
-/*
- * Initialize context for node selection plugin and
- * restore any global state information
- */
-extern int select_g_state_restore(char *dir_name)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.state_restore))(dir_name);
-}
-
-/*
- * Note re/initialization of node record data structure
- */
-extern int select_g_node_init(struct node_record *node_ptr, int node_cnt)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.node_init))(node_ptr, node_cnt);
-}
-
-
-/*
- * Note re/initialization of partition record data structure
- */
-extern int select_g_part_init(List part_list)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.part_init))(part_list);
-}
-
-/*
- * Select the "best" nodes for given job from those available
- */
-extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-        int min_nodes, int max_nodes)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.job_test))(job_ptr, bitmap, 
-		min_nodes, max_nodes);
-}
-
-/*
- * Note initiation of job is about to begin
- */
-extern int select_g_job_init(struct job_record *job_ptr)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.job_init))(job_ptr);
-}
-
-/*
- * Note termination of job is starting
- */
-extern int select_g_job_fini(struct job_record *job_ptr)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.job_fini))(job_ptr);
-}
-
diff --git a/src/slurmctld/select_plugin.h b/src/slurmctld/select_plugin.h
deleted file mode 100644
index d5360048f34270ccc593d32d060f4d6379327a61..0000000000000000000000000000000000000000
--- a/src/slurmctld/select_plugin.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*****************************************************************************\
- *  select_plugin.h - Define node selection plugin functions.
- *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
- *  UCRL-CODE-2002-040.
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
-\*****************************************************************************/
-
-#ifndef __SELECT_PLUGIN_API_H__
-#define __SELECT_PLUGIN_API_H__
-
-#include "src/common/list.h"
-#include "src/slurmctld/slurmctld.h"
-
-/*
- * Initialize context for node selection plugin
- */
-extern int slurm_select_init(void);
-
-/*
- * Save any global state information
- */
-extern int select_g_state_save(char *dir_name);
-
-/*
- * Initialize context for node selection plugin and
- * restore any global state information
- */
-extern int select_g_state_restore(char *dir_name);
-
-/*
- * Note re/initialization of node record data structure
- */
-extern int select_g_node_init(struct node_record *node_ptr, int node_cnt);
-
-/*
- * Note re/initialization of partition record data structure
- */
-extern int select_g_part_init(List part_list);
-
-/*
- * Select the "best" nodes for given job from those available
- */
-extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-	int min_nodes, int max_nodes);
-
-/*
- * Note initiation of job is about to begin
- */
-extern int select_g_job_init(struct job_record *job_ptr);
-
-/*
- * Note termination of job is starting
- */
-extern int select_g_job_fini(struct job_record *job_ptr);
-
-#endif /*__SELECT_PLUGIN_API_H__*/
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 145bb1837f4689827a35ce8c7e8e9cf587a3ed04..4204db440260d3f8462fa7d6d5f6caf6b6ec9a27 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -275,13 +275,7 @@ struct job_record {
 	uint16_t kill_on_step_done;	/* 1 if job should be killed when 
 					 * the job step completes, 2 if kill
 					 * in progress */
-#ifdef HAVE_BGL
-	uint16_t geometry[SYSTEM_DIMENSIONS]; /* geometry of the requested job */
-	uint16_t conn_type;		/* type of connection for request */
-	uint16_t node_use;		/* node usage for Blue Gene */ 
-	uint16_t rotate;		/* allow rotation of request?	  */
-	char *bgl_part_id;		/* BGL partition ID */
-#endif
+	select_jobinfo_t select_jobinfo;	/* opaque data */
 	char *nodes;			/* list of nodes allocated to job */
 	bitstr_t *node_bitmap;		/* bitmap of nodes allocated to job */
 	uint32_t num_procs;		/* count of required/allocated processors */
@@ -339,15 +333,6 @@ extern List job_list;			/* list of job_record entries */
  *  Global slurmctld functions
 \*****************************************************************************/
 
-/*
- * allocate_nodes - change state of specified nodes to NODE_STATE_ALLOCATED
- * IN job_ptr - job being allocated resources
- * globals: node_record_count - number of nodes in the system
- *	node_record_table_ptr - pointer to global node table
- *	last_node_update - last update time of node table
- */
-extern void  allocate_nodes (struct job_record *job_ptr);
-
 /*
  * bitmap2node_name - given a bitmap, build a list of comma separated node 
  *	names. names may include regular expressions (e.g. "lx[01-10]")
@@ -358,22 +343,6 @@ extern void  allocate_nodes (struct job_record *job_ptr);
  */
 extern char * bitmap2node_name (bitstr_t *bitmap) ;
 
-/*
- * build_node_details - set cpu counts and addresses for allocated nodes:
- *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
- * IN job_ptr - pointer to a job record
- */
-extern void build_node_details (struct job_record *job_ptr);
-
-/*
- * count_cpus - report how many cpus are associated with the identified nodes 
- * IN bitmap - map of nodes to tally
- * RET cpu count
- * globals: node_record_count - number of nodes configured
- *	node_record_table_ptr - pointer to global node table
- */
-extern int  count_cpus (unsigned *bitmap);
-
 /*
  * create_config_record - create a config_record entry and set is values to 
  *	the defaults. each config record corresponds to a line in the  
@@ -431,17 +400,6 @@ extern struct part_record *create_part_record (void);
  */
 extern struct step_record * create_step_record (struct job_record *job_ptr);
 
-/*
- * deallocate_nodes - for a given job, deallocate its nodes and make 
- *	their state NODE_STATE_COMPLETING
- * IN job_ptr - pointer to terminating job
- * IN timeout - true of job exhausted time limit, send REQUEST_KILL_TIMELIMIT
- *	RPC instead of REQUEST_JOB_KILL
- * globals: node_record_count - number of nodes in the system
- *	node_record_table_ptr - pointer to global node table
- */
-extern void deallocate_nodes (struct job_record  * job_ptr, bool timeout);
-
 /* 
  * delete_all_step_records - delete all step record for specified job_ptr
  * IN job_ptr - pointer to job table entry to have step record added
@@ -979,15 +937,6 @@ void part_fini (void);
  */
 extern void purge_old_job (void);
 
-/*
- * re_kill_job - for a given job, deallocate its nodes for a second time, 
- *      basically a cleanup for failed deallocate() calls
- * IN job_ptr - pointer to terminating job (already in some COMPLETING state)
- * globals: node_record_count - number of nodes in the system
- *      node_record_table_ptr - pointer to global node table
- */
-extern void re_kill_job(struct job_record *job_ptr);
-
 /*
  * rehash_jobs - Create or rebuild the job hash table.
  * NOTE: run lock_slurmctld before entry: Read config, write job
@@ -1039,25 +988,6 @@ extern void save_all_state(void);
  */
 extern int schedule (void);
 
-/*
- * select_nodes - select and allocate nodes to a specific job
- * IN job_ptr - pointer to the job record
- * IN test_only - if set do not allocate nodes, just confirm they  
- *	could be allocated now
- * RET 0 on success, ESLURM code from slurm_errno.h otherwise
- * globals: list_part - global list of partition info
- *	default_part_loc - pointer to default partition 
- *	config_list - global list of node configuration info
- * Notes: The algorithm is
- *	1) Build a table (node_set_ptr) of nodes with the requisite 
- *	   configuration. Each table entry includes their weight, 
- *	   node_list, features, etc.
- *	2) Call _pick_best_nodes() to select those nodes best satisfying 
- *	   the request, (e.g. best-fit or other criterion)
- *	3) Call allocate_nodes() to perform the actual allocation
- */
-extern int select_nodes (struct job_record *job_ptr, bool test_only);
-
 /*
  * set_node_down - make the specified node's state DOWN if possible
  *	(not in a DRAIN state), kill jobs as needed 
diff --git a/src/slurmctld/srun_comm.c b/src/slurmctld/srun_comm.c
index fe02f9435c3e6614199f72950463f2b12ffc89ff..963aa184a614f97c0c840aec6bc61218c51b933f 100644
--- a/src/slurmctld/srun_comm.c
+++ b/src/slurmctld/srun_comm.c
@@ -3,7 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Moe Jette <jette1@llnl.gov>
+ *  Written by Morris Jette <jette1@llnl.gov>
  *  UCRL-CODE-2002-040.
  *  
  *  This file is part of SLURM, a resource management program.
@@ -30,6 +30,7 @@
 
 #include <string.h>
 
+#include "src/common/node_select.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -89,9 +90,8 @@ extern void srun_allocate (uint32_t job_id)
 				job_ptr->node_cnt);
 		memcpy(msg_arg->node_addr, job_ptr->node_addr,
 				(sizeof(slurm_addr) * job_ptr->node_cnt));
-#ifdef HAVE_BGL
-		msg_arg->bgl_part_id    = xstrdup(job_ptr->bgl_part_id);
-#endif
+		msg_arg->select_jobinfo = select_g_copy_jobinfo(
+				job_ptr->select_jobinfo);
 		msg_arg->error_code	= SLURM_SUCCESS;
 		_srun_agent_launch(addr, job_ptr->host, 
 				RESPONSE_RESOURCE_ALLOCATION, msg_arg);
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index ae777d5fc4b4f27136f5f1ed3874c8db9f27c35d..ca81a4a056a15e13f0c7d5c092a5a480a0b9d0d1 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -46,6 +46,7 @@
 #include "src/common/xstring.h"
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/locks.h"
+#include "src/slurmctld/node_scheduler.h"
 #include "src/slurmctld/slurmctld.h"
 
 #define MAX_RETRIES 10
@@ -276,8 +277,8 @@ void signal_step_tasks(struct step_record *step_ptr, uint16_t signal)
 			node_names[MAX_NAME_LEN * agent_args->node_count],
 			node_record_table_ptr[i].name, MAX_NAME_LEN);
 		agent_args->node_count++;
-#ifdef HAVE_BGL
-		break;	/* only do the one front-end node */
+#ifdef HAVE_BGL			/* only do the one front-end node */
+		break;
 #endif
 	}
 
diff --git a/src/slurmd/mgr.c b/src/slurmd/mgr.c
index 04b30890f8b8ff85444ca319da7e278889a8075c..bd15d6b184510e939d9cc02d682caf6959b129ce 100644
--- a/src/slurmd/mgr.c
+++ b/src/slurmd/mgr.c
@@ -57,6 +57,7 @@
 #include "src/common/cbuf.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
+#include "src/common/node_select.h"
 #include "src/common/fd.h"
 #include "src/common/safeopen.h"
 #include "src/common/setenvpf.h"
@@ -959,7 +960,7 @@ _make_batch_script(batch_job_launch_msg_t *msg, char *path)
 static int
 _setup_batch_env(slurmd_job_t *job, batch_job_launch_msg_t *msg)
 {
-	char       buf[1024], *task_buf;
+	char       buf[1024], *task_buf, *bgl_part_id = NULL;
 	hostlist_t hl = hostlist_create(msg->nodes);
 
 	if (!hl)
@@ -975,9 +976,12 @@ _setup_batch_env(slurmd_job_t *job, batch_job_launch_msg_t *msg)
 	setenvpf(&job->env, "SLURM_TASKS_PER_NODE", "%s", task_buf);
 	xfree(task_buf); 
 
-#ifdef HAVE_BGL
-	setenvpf(&job->env, "BGL_PARTITION_ID", "%s", msg->bgl_part_id);
-#endif
+	select_g_get_jobinfo(msg->select_jobinfo, 
+		SELECT_DATA_PART_ID, &bgl_part_id);
+	if (bgl_part_id) {
+		setenvpf(&job->env, "BGL_PARTITION_ID", "%s", bgl_part_id);
+		xfree(bgl_part_id);
+	}
 
 	return 0;
 }
diff --git a/src/squeue/opts.c b/src/squeue/opts.c
index 4bd141fff1e25c9e4f2d405ce67d310e8d240991..d5c822f3b21d72ebaefd63b52286d5ac6d597fde 100644
--- a/src/squeue/opts.c
+++ b/src/squeue/opts.c
@@ -492,6 +492,11 @@ extern int parse_format( char* format )
 							field_size,
 							right_justify,
 							suffix );
+			else if (field[0] == 's')
+				job_format_add_select_jobinfo( params.format_list, 
+				                         field_size, 
+				                         right_justify, 
+				                         suffix );
 			else if (field[0] == 'S')
 				job_format_add_time_start( params.format_list, 
 				                           field_size, 
@@ -523,26 +528,6 @@ extern int parse_format( char* format )
 				                          field_size, 
 				                          right_justify, 
 				                          suffix );
-			else if (field[0] == 'y')
-				job_format_add_geometry( params.format_list, 
-				                         field_size, 
-				                         right_justify, 
-				                         suffix );
-			else if (field[0] == 'Y')
-				job_format_add_conn_type( params.format_list, 
-				                         field_size, 
-				                         right_justify, 
-				                         suffix );
-			else if (field[0] == 'z')
-				job_format_add_rotate( params.format_list, 
-				                         field_size, 
-				                         right_justify, 
-				                         suffix );
-			else if (field[0] == 'Z')
-				job_format_add_node_use( params.format_list, 
-				                         field_size, 
-				                         right_justify, 
-				                         suffix );
 			else 
 				error( "Invalid job format specification: %c", 
 				       field[0] );
diff --git a/src/squeue/print.c b/src/squeue/print.c
index 3aeb9024dda48166e224a2b8589a89075983096b..397456b6284a027abf7a1435b88e0c89dcef5a22 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -4,7 +4,7 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, 
- *             Moe Jette <jette1@llnl.gov>, et. al.
+ *             Morris Jette <jette1@llnl.gov>, et. al.
  *  UCRL-CODE-2002-040.
  *  
  *  This file is part of SLURM, a resource management program.
@@ -35,6 +35,7 @@
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 #include "src/squeue/print.h"
@@ -792,69 +793,19 @@ int _print_job_dependency(job_info_t * job, int width, bool right_justify,
 	return SLURM_SUCCESS;
 }
 
-int _print_job_geometry(job_info_t * job, int width, bool right_justify,
+int _print_job_select_jobinfo(job_info_t * job, int width, bool right_justify,
 			char* suffix) 
 {
-	if (job == NULL)	/* Print the Header instead */
-		_print_str("GEOMETRY", width, right_justify, true);
-	else {
-#ifdef HAVE_BGL
-		char id[FORMAT_STRING_SIZE];
-		snprintf(id, FORMAT_STRING_SIZE, "%u,%u,%u", 
-			job->geometry[0], job->geometry[1], job->geometry[2]);
-		_print_str(id, width, right_justify, true);
-#else
-		_print_str("n/a", width, right_justify, true);
-#endif
-	}
-	if (suffix)
-		printf("%s", suffix); 
-	return SLURM_SUCCESS;
-}
+	char select_buf[100];
 
-int _print_job_conn_type(job_info_t * job, int width, bool right_justify,
-			char* suffix) 
-{
 	if (job == NULL)	/* Print the Header instead */
-		_print_str("CONN_TYPE", width, right_justify, true);
+		select_g_sprint_jobinfo(NULL,
+			select_buf, sizeof(select_buf), SELECT_PRINT_HEAD);
 	else
-		_print_str(job_conn_type_string(job->conn_type), 
-			width, right_justify, true);
-	if (suffix)
-		printf("%s", suffix); 
-	return SLURM_SUCCESS;
-}
+		select_g_sprint_jobinfo(job->select_jobinfo,
+			select_buf, sizeof(select_buf), SELECT_PRINT_DATA);
+	_print_str(select_buf, width, right_justify, true);
 
-int _print_job_node_use(job_info_t * job, int width, bool right_justify,
-			char* suffix) 
-{
-	if (job == NULL)	/* Print the Header instead */
-		_print_str("NODE_USE", width, right_justify, true);
-	else
-		_print_str(job_node_use_string(job->node_use), 
-			width, right_justify, true);
-	if (suffix)
-		printf("%s", suffix); 
-	return SLURM_SUCCESS;
-}
-
-int _print_job_rotate(job_info_t * job, int width, bool right_justify,
-			char* suffix) 
-{
-	if (job == NULL)	/* Print the Header instead */
-		_print_str("ROTATE", width, right_justify, true);
-	else {
-		char *id;
-#ifdef HAVE_BGL
-		if (job->rotate == 0)
-			id = "no";
-		else
-			id = "yes";
-#else
-		id = "n/a";
-#endif
-		_print_str(id, width, right_justify, true);
-	}
 	if (suffix)
 		printf("%s", suffix); 
 	return SLURM_SUCCESS;
diff --git a/src/squeue/print.h b/src/squeue/print.h
index 6728541f22bc03f5904398268200fcac1fa79416..7c879fddea31f7c1deeda8f39355454768347d6b 100644
--- a/src/squeue/print.h
+++ b/src/squeue/print.h
@@ -136,14 +136,8 @@ int job_format_add_function(List list, int width, bool right_justify,
 	job_format_add_function(list,wid,right,suffix,_print_job_account)
 #define job_format_add_dependency(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_dependency)
-#define job_format_add_geometry(list,wid,right,suffix) \
-	job_format_add_function(list,wid,right,suffix,_print_job_geometry)
-#define job_format_add_rotate(list,wid,right,suffix) \
-	job_format_add_function(list,wid,right,suffix,_print_job_rotate)
-#define job_format_add_conn_type(list,wid,right,suffix) \
-	job_format_add_function(list,wid,right,suffix,_print_job_conn_type)
-#define job_format_add_node_use(list,wid,right,suffix) \
-	job_format_add_function(list,wid,right,suffix,_print_job_node_use)
+#define job_format_add_select_jobinfo(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_select_jobinfo)
 
 /*****************************************************************************
  * Job Line Print Functions
@@ -216,13 +210,7 @@ int _print_job_account(job_info_t * job, int width, bool right_justify,
 			char* suffix);
 int _print_job_dependency(job_info_t * job, int width, bool right_justify,
 			char* suffix);
-int _print_job_geometry(job_info_t * job, int width, bool right_justify,
-			char* suffix);
-int _print_job_rotate(job_info_t * job, int width, bool right_justify,
-			char* suffix);
-int _print_job_conn_type(job_info_t * job, int width, bool right_justify,
-			char* suffix);
-int _print_job_node_use(job_info_t * job, int width, bool right_justify,
+int _print_job_select_jobinfo(job_info_t * job, int width, bool right_justify,
 			char* suffix);
 
 /*****************************************************************************
diff --git a/src/srun/job.c b/src/srun/job.c
index fdb6199a312e6ffaadabef1fb3d7110171667835..d2a754c4a8580fd462a6760801ad5ccc2cc7bcb1 100644
--- a/src/srun/job.c
+++ b/src/srun/job.c
@@ -64,9 +64,7 @@ typedef struct allocation_info {
 	int                     num_cpu_groups;
 	int                    *cpus_per_node;
 	int                    *cpu_count_reps;
-#ifdef HAVE_BGL
-	char                   *bgl_part_id;
-#endif
+	select_jobinfo_t select_jobinfo;
 } allocation_info_t;
 
 
@@ -182,9 +180,7 @@ job_create_allocation(resource_allocation_response_msg_t *resp)
 	i->cpu_count_reps = resp->cpu_count_reps;
 	i->addrs          = resp->node_addr;
 
-#ifdef HAVE_BGL
-	i->bgl_part_id    = resp->bgl_part_id;
-#endif
+	i->select_jobinfo = select_g_copy_jobinfo(resp->select_jobinfo);
 
 	job = _job_create_internal(i);
 
@@ -480,18 +476,18 @@ _job_create_internal(allocation_info_t *info)
 
 	job->nodelist = xstrdup(info->nodelist);
 	hl = hostlist_create(job->nodelist);
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Run only on front-end */
 	/* All jobs execute through front-end on Blue Gene/L.
 	 * Normally we would not permit execution of job steps, 
 	 * but can fake it by just allocating all tasks to 
 	 * one of the allocated nodes. */
 	job->nhosts    = 1;
 	opt.overcommit = true;
-	job->bgl_part_id = xstrdup(info->bgl_part_id);
 #else
 	job->nhosts = hostlist_count(hl);
 #endif
 
+	job->select_jobinfo = info->select_jobinfo;
 	job->jobid   = info->jobid;
 	job->stepid  = info->stepid;
 	job->old_job = false;
diff --git a/src/srun/job.h b/src/srun/job.h
index f6fd665ff04a31f0d9bf54c10f9e6660ab2d7421..a790b68e8e8aacea5073cff44b6777fe44e7bad7 100644
--- a/src/srun/job.h
+++ b/src/srun/job.h
@@ -37,6 +37,7 @@
 
 #include "src/common/cbuf.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/srun/fname.h"
 
@@ -139,9 +140,7 @@ typedef struct srun_job {
 	int   stdinfd;
 	bool *stdin_eof;  /* true if task i processed stdin eof */
 
-#ifdef HAVE_BGL
-	char *bgl_part_id;
-#endif
+	select_jobinfo_t select_jobinfo;
 } job_t;
 
 void    update_job_state(job_t *job, job_state_t newstate);
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 2fc9faee868bbe7480bbd56f5fd479641eb01415..5a4f614ad0c6951a01dc2f89399a251a594ba56d 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -1402,7 +1402,7 @@ static void _usage(void)
 "            [--core=type] [-T threads] [-W sec] [--attach] [--join] \n"
 "            [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n"
 "            [--mpi=type] [--account=name] [--dependency=jobid]\n"
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Blue gene specific options */
 "            [--geometry=XxYxZ] [--conn-type=type] [--no-rotate]\n"
 "            [--node-use=type]\n"
 #endif
@@ -1469,7 +1469,7 @@ static void _help(void)
 "  -x, --exclude=hosts...      exclude a specific list of hosts\n"
 "  -Z, --no-allocate           don't allocate nodes (must supply -w)\n"
 "\n"
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL				/* Blue gene specific options */
   "Blue Gene related options:\n"
   "  -g, --geometry=XxYxZ        geometry constraints of the job\n"
   "  -R, --no-rotate             disable geometry rotation\n"
diff --git a/src/srun/reattach.c b/src/srun/reattach.c
index 3f85f0e56b034dbb91688b824d5fe265a99db34c..d8d7c0454f5e8c8357644a35b2c54fd575766500 100644
--- a/src/srun/reattach.c
+++ b/src/srun/reattach.c
@@ -193,7 +193,7 @@ _get_job_info(srun_step_t *s)
 	job_info_msg_t *resp = NULL;
 	job_info_t     *job  = NULL;
 	hostlist_t      hl;
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Fake address for front-end node */
 	old_job_alloc_msg_t                 alloc_req;
 	resource_allocation_response_msg_t *alloc_resp = NULL;
 #endif
@@ -239,7 +239,7 @@ _get_job_info(srun_step_t *s)
 
 	hostlist_destroy(hl);
 
-#ifdef HAVE_BGL
+#ifdef HAVE_BGL		/* Fake address for front-end node */
 	/* now get actual node name for systems using front-end node */
 	alloc_req.job_id = s->jobid;
 	alloc_req.uid    = getuid();
diff --git a/src/srun/srun.c b/src/srun/srun.c
index c8d5c21cc816c4e7b7e9d49c98225357d263f6ee..6f09ed4fe400122b3e9224e9b8d70c9eebad3384 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -104,7 +104,7 @@ int srun(int ac, char **av)
 {
 	allocation_resp *resp;
 	job_t *job;
-	char *task_cnt;
+	char *task_cnt, *bgl_part_id = NULL;
 
 	log_options_t logopt = LOG_OPTS_STDERR_ONLY;
 
@@ -216,9 +216,12 @@ int srun(int ac, char **av)
 	setenvf("SLURM_TASKS_PER_NODE=%s", task_cnt = _task_count_string (job));
 	setenvf("SLURM_DISTRIBUTION=%s",
 		format_distribution_t (opt.distribution));
-#ifdef HAVE_BGL
-	setenvf("BGL_PARTITION_ID=%s",    job->bgl_part_id);
-#endif
+
+	if (job->select_jobinfo)
+		select_g_get_jobinfo(job->select_jobinfo, 
+				SELECT_DATA_PART_ID, &bgl_part_id);
+	if (bgl_part_id)
+		setenvf("BGL_PARTITION_ID=%s", bgl_part_id);
 
 	xfree(task_cnt);
 
@@ -611,12 +614,16 @@ _set_batch_script_env(job_t *job)
 		rc = SLURM_FAILURE;
 	}
 
-#ifdef HAVE_BGL
-	if (job && setenvf("BGL_PARTITION_ID=%s", job->bgl_part_id)) {
-		error("Can't set BGL_PARTITION_ID environment variable");
-		rc = SLURM_FAILURE;
+	if (job) {
+		char *bgl_part_id = NULL;
+		select_g_get_jobinfo(job->select_jobinfo, 
+				SELECT_DATA_PART_ID, &bgl_part_id);
+		if (bgl_part_id 
+		&& setenvf("BGL_PARTITION_ID=%s", bgl_part_id)) {
+			error("Can't set BGL_PARTITION_ID environment variable");
+			rc = SLURM_FAILURE;
+		}
 	}
-#endif
 
 	/*
 	 * If no job has been allocated yet, just return. We are