From d9322510958c0645f18caee5fc5dbc6908390d70 Mon Sep 17 00:00:00 2001
From: Danny Auble <da@llnl.gov>
Date: Fri, 3 Mar 2006 21:11:07 +0000
Subject: [PATCH] merged with bg-remodel branch.

---
 NEWS                                          |    8 +-
 doc/html/bluegene.shtml                       |  323 ++--
 doc/man/man1/smap.1                           |  147 +-
 etc/bluegene.conf.example                     |   49 +-
 slurm/slurm.h.in                              |   27 +-
 src/api/init_msg.c                            |   11 +-
 src/api/job_info.c                            |   65 +-
 src/api/node_info.c                           |    6 +-
 src/api/node_select_info.c                    |    4 +-
 src/api/node_select_info.h                    |    2 +
 src/api/partition_info.c                      |   25 +-
 src/api/signal.c                              |    6 +-
 src/common/forward.c                          |    2 +-
 src/common/node_select.c                      |  229 ++-
 src/common/node_select.h                      |   29 +-
 src/common/slurm_auth.c                       |    2 +-
 src/common/slurm_protocol_api.c               |   48 +-
 src/common/slurm_protocol_api.h               |    1 +
 src/common/slurm_protocol_pack.c              |  495 +++--
 .../block_allocator/block_allocator.c         |  184 +-
 .../block_allocator/block_allocator.h         |   11 +-
 .../select/bluegene/plugin/bg_block_info.c    |    8 +-
 .../select/bluegene/plugin/bg_job_place.c     |  365 +++-
 .../select/bluegene/plugin/bg_job_place.h     |    7 +-
 .../select/bluegene/plugin/bg_job_run.c       |  270 ++-
 .../bluegene/plugin/bg_switch_connections.c   |   20 +-
 .../select/bluegene/plugin/block_sys.c        |  198 +-
 src/plugins/select/bluegene/plugin/bluegene.c | 1631 ++++++++++++-----
 src/plugins/select/bluegene/plugin/bluegene.h |   52 +-
 .../select/bluegene/plugin/select_bluegene.c  |  158 +-
 src/plugins/select/bluegene/plugin/sfree.c    |   26 +-
 src/plugins/select/cons_res/select_cons_res.c |    7 +
 src/plugins/select/linear/select_linear.c     |    7 +-
 src/scontrol/scontrol.c                       |   40 +-
 src/sinfo/print.c                             |   67 +-
 src/sinfo/sinfo.c                             |   55 +-
 src/slurmctld/job_mgr.c                       |  133 +-
 src/slurmctld/node_scheduler.c                |   84 +-
 src/slurmctld/partition_mgr.c                 |   49 +-
 src/slurmctld/proc_req.c                      |    8 +-
 src/slurmctld/slurmctld.h                     |    2 +
 src/slurmctld/step_mgr.c                      |    4 +-
 src/slurmd/common/stepd_api.c                 |    2 +-
 src/slurmd/slurmstepd/mgr.c                   |    3 +-
 src/slurmd/slurmstepd/req.c                   |    4 +-
 src/smap/configure_functions.c                |  300 ++-
 src/smap/grid_functions.c                     |   22 +-
 src/smap/job_functions.c                      |   95 +-
 src/smap/partition_functions.c                |  138 +-
 src/smap/smap.c                               |    7 +-
 src/squeue/print.c                            |  109 +-
 src/srun/allocate.c                           |   27 +-
 src/srun/opt.c                                |  111 +-
 src/srun/opt.h                                |    4 +-
 testsuite/expect/globals.example              |   30 +
 testsuite/expect/test1.13                     |    2 +-
 testsuite/expect/test1.19                     |   12 +-
 testsuite/expect/test1.22                     |   10 +-
 testsuite/expect/test1.25                     |    8 +-
 testsuite/expect/test1.31                     |   14 +-
 testsuite/expect/test1.35                     |    9 +-
 testsuite/expect/test1.36                     |   11 +-
 testsuite/expect/test1.41                     |    9 +-
 testsuite/expect/test1.42                     |   10 +-
 testsuite/expect/test1.45                     |    8 +-
 testsuite/expect/test1.47                     |    2 +-
 testsuite/expect/test1.49                     |    8 +-
 testsuite/expect/test1.81                     |    5 +-
 testsuite/expect/test1.82                     |    9 +-
 testsuite/expect/test1.84                     |    6 +-
 testsuite/expect/test10.12                    |    2 +-
 testsuite/expect/test10.13                    |   23 +-
 testsuite/expect/test10.3                     |    7 +-
 testsuite/expect/test10.4                     |    6 +-
 testsuite/expect/test10.5                     |    7 +-
 testsuite/expect/test10.6                     |    6 +-
 testsuite/expect/test10.7                     |    2 +-
 testsuite/expect/test10.8                     |   23 +-
 testsuite/expect/test10.9                     |    2 +-
 testsuite/expect/test3.7                      |    6 +-
 testsuite/expect/test4.11                     |    6 +-
 testsuite/expect/test4.3                      |   14 +-
 testsuite/expect/test4.4                      |   10 +-
 testsuite/expect/test7.2                      |    8 +-
 testsuite/expect/test8.3                      |   10 +-
 testsuite/expect/test9.1                      |    9 +-
 testsuite/expect/test9.2                      |    6 +-
 testsuite/expect/test9.3                      |    6 +-
 testsuite/expect/test9.4                      |    7 +-
 testsuite/expect/test9.5                      |    6 +-
 testsuite/expect/test9.6                      |    7 +-
 testsuite/expect/test9.7                      |    2 +-
 testsuite/expect/test9.7.bash                 |    8 +-
 .../slurm_unit/slurmctld/security_2_2.bash    |   16 +
 94 files changed, 4000 insertions(+), 2039 deletions(-)
 create mode 100755 testsuite/slurm_unit/slurmctld/security_2_2.bash

diff --git a/NEWS b/NEWS
index db0d6157f81..a8a365e91c3 100644
--- a/NEWS
+++ b/NEWS
@@ -4,7 +4,13 @@ documents those changes that are of interest to users and admins.
 * Changes in SLURM 1.1.0-pre2
 =============================
  -- Added basic "sbcast" support, still needs message fanout logic.
-
+ -- Bluegene specific - Added support for overlapping partitions and 
+    dynamic partitioning. 
+ -- Bluegene specific - Added support for nodecard sized blocks.
+ -- Added logic to accept 1k for 1024 and so on for --nodes option of srun. 
+    This logic is through display tools such as smap, sinfo, scontrol, and 
+    squeue.
+ 
 * Changes in SLURM 1.1.0-pre1
 =============================
  -- New --enable-multiple-slurmd configure parameter to allow running
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index c7a9a68d3ab..56a23539744 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -1,46 +1,50 @@
 <!--#include virtual="header.txt"-->
 
-<h1>Blue Gene User and Administrator Guide</h1>
+<h1>BlueGene User and Administrator Guide</h1>
 
 <h2>Overview</h2>
 
 <p>This document describes the unique features of SLURM on the
-<a href="http://www.research.ibm.com/bluegene">IBM Blue Gene</a> systems. 
+<a href="http://www.research.ibm.com/bluegene">IBM BlueGene</a> systems. 
 You should be familiar with the SLURM's mode of operation on Linux clusters 
-before studying the relatively few differences in Blue Gene operation 
+before studying the relatively few differences in BlueGene operation 
 described in this document.</p>
 
-<p>Blue Gene systems have several unique features making for a few 
+<p>BlueGene systems have several unique features making for a few 
 differences in how SLURM operates there. 
-The basic unit of resource allocation is a <i>base partition</i> or <i>midplane</i>.
-The <i>base partitions</i> are connected in a three-dimensional torus. 
-Each <i>base partition</i> includes 512 <i>c-nodes</i> each containing two processors; 
+The BlueGene system consists of one or more <i>base partitions</i> or 
+<i>midplanes</i> connected in a three-dimensional torus. 
+Each <i>base partition</i> consists of 512 <i>c-nodes</i> each containing two processors; 
 one designed primarily for computations and the other primarily for managing communications. 
-SLURM considers each <i>base partition</i> as one node with 1024 processors. 
 The <i>c-nodes</i> can execute only one process and thus are unable to execute both 
 the user's jobs and SLURM's <i>slurmd</i> daemon.
-Thus the <i>slurmd</i> daemon executes on one of the Blue Gene <i>Front End Nodes</i>. 
+Thus the <i>slurmd</i> daemon executes on one of the BlueGene <i>Front End Nodes</i>. 
 This single <i>slurmd</i> daemon provides (almost) all of the normal SLURM services 
 for every <i>base partition</i> on the system. </p>
 
+<p>Internally SLURM treats each <i>base partition</i> as one node with 
+1024 processors, which keeps the number of entities being managed reasonable.
+Since the current BlueGene software can sub-allocate a <i>base partition</i>
+into blocks of 32 and/or 128 <i>c-nodes</i>, more than one user job can execute 
+on each <i>base partition</i> (subject to system administrator configuration).
+To effectively utilize this environment, SLURM tools present the user with 
+the view that each <i>c-nodes</i> is a separate node, so allocation requests 
+and status information use <i>c-node</i> counts (this is a new feature in 
+SLURM version 1.1).</p>
+
 <h2>User Tools</h2>
 
 <p>The normal set of SLURM user tools: srun, scancel, sinfo, squeue and scontrol 
 provide all of the expected services except support for job steps. 
 SLURM performs resource allocation for the job, but initiation of tasks is performed 
-using the <i>mpirun</i> command. SLURM has no concept of a job step on Blue Gene.
-Four new srun options are available: 
+using the <i>mpirun</i> command. SLURM has no concept of a job step on BlueGene.
+Three new srun options are available: 
 <i>--geometry</i> (specify job size in each dimension),
 <i>--no-rotate</i> (disable rotation of geometry), 
-<i>--conn-type</i> (specify interconnect type between base partitions, mesh or torus), and 
-You can also continue to use the <i>--nodes</i> option with a minimum and (optionally) 
-maximum node count. 
-The <i>--ntasks</i> option continues to be supported and may be required to 
-control resource allocations less than a full <i>base partition</i> in size, 
-if the system is configured to support them.  
-For example "srun --nntasks 256 ..." indicates that the job requires a minimum 
-of 256 processors or one quarter of a midplane to execute. 
-See the srun man pages for details. </p>
+<i>--conn-type</i> (specify interconnect type between base partitions, mesh or torus).
+The <i>--nodes</i> option with a minimum and (optionally) maximum node count continues 
+to be available.  
+Note that this is a c-node count.</p>
 
 <p>To reiterate: srun is used to submit a job script, but mpirun is used to launch the parallel tasks. 
 <b>It is highly recommended that the srun <i>--batch</i> option be used to submit a script.</b> 
@@ -56,7 +60,7 @@ is ready for use before initiating any mpirun commands.
 SLURM will assume this responsibility for batch jobs. 
 The script that you submit to SLURM can contain multiple invocations of mpirun as
 well as any desired commands for pre- and post-processing.
-The mpirun command will get its <i>bgblock</i> or BG block information from the
+The mpirun command will get its <i>bgblock</i> information from the
 <i>MPIRUN_PARTITION</i> as set by SLURM. A sample script is shown below.
 <pre>
 #!/bin/bash
@@ -70,22 +74,27 @@ date
 </pre></p>
  
 <a name="naming">
-<p>The naming of nodes includes a three-digit suffix representing the base partition's 
+<p>The naming of base partitions includes a three-digit suffix representing the its 
 coordinates in the X, Y and Z dimensions with a zero origin.
-For example, "bg012" represents the base partition whose coordinate is at X=0, Y=1 and Z=2. 
-Since jobs must be allocated consecutive nodes in all three dimensions, we have developed 
-an abbreviated format for describing the nodes in one of these three-dimensional blocks. 
-The node's prefix of "bg" is followed by the end-points of the block enclosed in square-brackets. 
-For example, " bg[620x731]" is used to represent the eight nodes enclosed in a block 
+For example, "bg012" represents the base partition whose coordinate is at X=0, Y=1 and Z=2.  In a system 
+configured with <i>small blocks</i> (any block less than a full base partition) there will be divisions
+into the base partition notation.  For example, bg012.0 represents the first quarter of a midplane in 
+bluegene/L this would be 128 c-node block, and bg012.1.0 would represent the first nodecard in the second
+quarter of the base partition, or a 32 c-node block.
+Since jobs must allocate consecutive base partitions in all three dimensions, we have developed 
+an abbreviated format for describing the base partitions in one of these three-dimensional blocks. 
+The base partition has a prefix determined from the system which is followed by the end-points 
+of the block enclosed in square-brackets. 
+For example, "bg[620x731]" is used to represent the eight base partitions enclosed in a block 
 with endpoints bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721, 
 bg730 and bg731).</p></a>
 
 <p>One new tool provided is <i>smap</i>.
-Smap is aware of system topography and provides a map of what nodes are allocated 
+Smap is aware of system topography and provides a map of what base partitions are allocated 
 to jobs, partitions, etc. 
 See the smap man page for details.
 A sample of smap output is provided below showing the location of five jobs. 
-Note the format of the list of nodes allocated to each job.
+Note the format of the list of base partitions allocated to each job.
 Also note that idle (unassigned) base partitions are indicated by a period.
 Down and drained base partitions (those not available for use) are 
 indicated by a number sign (bg703 in the display below).
@@ -96,12 +105,12 @@ Values in the X dimension increase to the right.
 Values in the Z dimension increase down and toward the left.</p>
 
 <pre>
-   a a a a b b d d    ID JOBID PARTITION BG_BLOCK USER   NAME ST TIME NODES NODELIST
-  a a a a b b d d     a  12345 batch     RMP0     joseph tst1 R  43:12   64 bg[000x333]
- a a a a b b c c      b  12346 debug     RMP1     chris  sim3 R  12:34   16 bg[420x533]
-a a a a b b c c       c  12350 debug     RMP2     danny  job3 R   0:12    8 bg[622x733]
-                      d  12356 debug     RMP3     dan    colu R  18:05   16 bg[600x731]
-   a a a a b b d d    e  12378 debug     RMP4     joseph asx4 R   0:34    4 bg[612x713]
+   a a a a b b d d    ID JOBID PARTITION BG_BLOCK USER   NAME ST TIME NODES BP_LIST
+  a a a a b b d d     a  12345 batch     RMP0     joseph tst1 R  43:12  32k bg[000x333]
+ a a a a b b c c      b  12346 debug     RMP1     chris  sim3 R  12:34   8k bg[420x533]
+a a a a b b c c       c  12350 debug     RMP2     danny  job3 R   0:12   4k bg[622x733]
+                      d  12356 debug     RMP3     dan    colu R  18:05   8k bg[600x731]
+   a a a a b b d d    e  12378 debug     RMP4     joseph asx4 R   0:34   2k bg[612x713]
   a a a a b b d d
  a a a a b b c c
 a a a a b b c c
@@ -128,12 +137,12 @@ You can identify the bgblock associated with your job using the command
 <i>smap -Dj -c</i> and the state of the bgblock with the command 
 <i>smap -Db -c</i>.
 The time to boot a bgblock is related to its size, but should range from 
-from a few minutes to about 15 minutes for a bgblock containing 64 
+from a few minutes to about 15 minutes for a bgblock containing 128 
 base partitions. 
 Only after the bgblock is READY will your job's output file be created 
 and the script execution begin. 
 If the bgblock boot fails, SLURM will attempt to reboot several times 
-before draining the associated nodes and aborting the job.</p>
+before draining the associated base partitions and aborting the job.</p>
 
 <p>The job will continue to be in a RUNNING state until the bgjob has 
 completed and the bgblock ownership is changed. 
@@ -141,11 +150,12 @@ The time for completing a bgjob has frequently been on the order of
 five minutes.
 In summary, your job may appear in SLURM as RUNNING for 15 minutes 
 before the script actually begins to 5 minutes after it completes.
-These delays are the result of BG infrastructure issues and are 
+These delays are the result of the bluegene infrastructure issues and are 
 not due to anything in SLURM.</p>
 
-<p>When using smap in curses mode you can scroll through the different windows
-using the arrow keys.  The <b>up</b> and <b>down</b> arrow keys scroll 
+<p>When using smap in default output  mode you can scroll through 
+the different windows using the arrow keys.  
+The <b>up</b> and <b>down</b> arrow keys scroll 
 the window containing the grid, and the <b>left</b> and <b>right</b> arrow 
 keys scroll the window containing the text information.</p>
  
@@ -153,10 +163,11 @@ keys scroll the window containing the text information.</p>
 
 <h2>System Administration</h2>
 
-<p>As of IBM's REV 2 driver SLURM must be built in 64bit mod.  
+<p>As of IBM's REV 2 driver SLURM must be built in 64-bit mode.  
 This can be done by specifying <b>CFLAGS=-m64 CXX="g++ -m64"</b>.  
-Both CFLAGS and CXX must be set for SLURM to compile correctly.
-<p>Building a Blue Gene compatible system is dependent upon the 
+Both CFLAGS and CXX must be set for SLURM to compile correctly.</p>
+
+<p>Building a BlueGene compatible system is dependent upon the 
 <i>configure</i> program locating some expected files. 
 In particular, the configure script searches for <i>libdb2.so</i> in the 
 directories <i>/home/bgdb2cli/sqllib</i> and <i>/u/bgdb2cli/sqllib</i>.
@@ -185,7 +196,7 @@ row/rack/midplane data.</p>
 to configure and build two sets of files for installation. 
 One set will be for the Service Node (SN), which has direct access to the BG Bridge APIs. 
 The second set will be for the Front End Nodes (FEN), whick lack access to the 
-Bridge APIs and interact with using Remote Procedure Calls to the slurmctld daemon.
+BG Bridge APIs and interact with using Remote Procedure Calls to the slurmctld daemon.
 You should see "#define HAVE_BG 1" and "#define HAVE_FRONT_END 1" in the "config.h" 
 file for both the SN and FEN builds. 
 You should also see "#define HAVE_BG_FILES 1" in config.h on the SN before 
@@ -196,12 +207,10 @@ If an optional backup daemon is used, it must be in some location where
 it is capable of executing BG Bridge APIs.
 One slurmd daemon should be configured to execute on one of the front end nodes. 
 That one slurmd daemon represents communications channel for every base partition. 
-A future release of SLURM will support multiple slurmd daemons on multiple
-front end nodes.
 You can use the scontrol command to drain individual nodes as desired and 
 return them to service. </p>
 
-<p>The slurm.conf (configuration) file needs to have the value of <i>InactiveLimit</i>
+<p>The <i>slurm.conf</i> (configuration) file needs to have the value of <i>InactiveLimit</i>
 set to zero or not specified (it defaults to a value of zero). 
 This is because there are no job steps and we don't want to purge jobs prematurely.
 The value of <i>SelectType</i> must be set to "select/bluegene" in order to have 
@@ -224,14 +233,14 @@ etc.).  Sample prolog and epilog scripts follow. </p>
 
 <pre>
 #!/bin/bash
-# Sample Blue Gene Prolog script
+# Sample BlueGene Prolog script
 #
 # Wait for bgblock to be ready for this job's use
 /usr/sbin/slurm_prolog
 
 
 #!/bin/bash
-# Sample Blue Gene Epilog script
+# Sample BlueGene Epilog script
 #
 # Cancel job to start the termination process for this job
 # and release the bgblock
@@ -243,7 +252,7 @@ etc.).  Sample prolog and epilog scripts follow. </p>
 
 <p>Since jobs with different geometries or other characteristics do not interfere 
 with each other's scheduling, backfill scheduling is not presently meaningful.
-SLURM's builtin scheduler on Blue Gene will sort pending jobs and then attempt 
+SLURM's builtin scheduler on BlueGene will sort pending jobs and then attempt 
 to schedule all of them in priority order. 
 This essentially functions as if there is a separate queue for each job size.
 Note that SLURM does support different partitions with an assortment of 
@@ -253,8 +262,8 @@ is enabled to execute jobs only at certain times; while a default partition
 could be configured to execute jobs at other times. 
 Jobs could still be queued in a partition that is configured in a DOWN 
 state and scheduled to execute when changed to an UP state. 
-Nodes can also be moved between slurm partitions either by changing 
-the slurm.conf file and restarting the slurmctld daemon or by using 
+Base partitions can also be moved between slurm partitions either by changing 
+the <i>slurm.conf</i> file and restarting the slurmctld daemon or by using 
 the scontrol reconfig command. </p>
 
 <p>SLURM node and partition descriptions should make use of the 
@@ -270,8 +279,8 @@ the slurmd daemon.
 No computer is actually expected to a hostname of "bg000" and no
 attempt will be made to route message traffic to this address. </p>
 
-<p>While users are unable to initiate SLURM job steps on Blue Gene systems, 
-this restriction does not apply to user root or SlurmUser. 
+<p>While users are unable to initiate SLURM job steps on BlueGene systems, 
+this restriction does not apply to user root or <i>SlurmUser</i>. 
 Be advised that the one slurmd supporting all nodes is unable to manage a 
 large number of job steps, so this ability should be used only to verify normal 
 SLURM operation. 
@@ -280,17 +289,9 @@ fail due to lack of memory or other resources.
 It is best to minimize other work on the front-end node executing slurmd
 so as to maximize its performance and minimize other risk factors.</p>
 
-<p>Presently the system administrator must explicitly define each of the 
-Blue Gene partitions (or bgblocks) available to execute jobs. 
-(<b>NOTE:</b> Blue Gene partitions are unrelated to SLURM partitions.)
-Jobs must then execute in one of these pre-defined bgblocks. 
-This is known as <i>static partitioning</i>. 
-Each of these bgblocks are explicitly configured with either a mesh or 
-torus interconnect.
-They must also not overlap, except for the implicitly defined full-system 
-bgblock.
-In addition to the normal <i>slurm.conf</i> file, a new 
-<i>bluegene.conf</i> configuration file is required with this information.
+<p>In addition to the normal <i>slurm.conf</i> file, a new 
+<i>bluegene.conf</i> configuration file is required with information pertainate 
+to the sytem.
 Put <i>bluegene.conf</i> into the SLURM configuration directory with
 <i>slurm.conf</i>.
 A sample file is installed in <i>bluegene.conf.example</i>. 
@@ -300,75 +301,131 @@ Note that <i>smap -Dc</i> can be run without the SLURM daemons
 active to establish the initial configuration.
 Note that the defined bgblocks may not overlap (except for the 
 full-system bgblock, which is implicitly created).
-See the smap man page for more information.
-You must insure that the nodes defined in <i>bluegene.conf</i> are 
-consistent with those defined in <i>slurm.conf</i> although the 
-<i>bluegene.conf</i> file contains only the numeric coordinates of 
-nodes while <i>slurm.conf</i> contains the name prefix in addition 
-to the numeric coordinates.
-Note that the Image and Numpsets values defined in <i>bluegene.conf</i>
-are used only when SLURM creates bgblocks.
+See the smap man page for more information.</p>
+
+<p>There are 3 different modes which the system administrator can define  
+BlueGene partitions (or bgblocks) available to execute jobs: static, 
+overlap, and dynamic. 
+Jobs must then execute in one of the created bgblocks.
+(<b>NOTE:</b> bgblocks are unrelated to SLURM partitions.)</p>
+
+<p>The default mode of partitioning is <i>static</i>.
+In this mode, the system administrator must explicitly define each 
+of the bgblocks in the <i>bluegene.conf</i> file.  
+Each of these bgblocks are explicitly configured with either a 
+mesh or torus interconnect.
+They must also not overlap, except for the implicitly defined full-system 
+bgblock. 
+Note that bgblocks are not rebooted between jobs in the mode 
+except when going to/from full-system jobs. 
+Eliminating bgblock booting can significantly improve system 
+utilization (eliminating boot time) and reliability.</p>
+
+<p>The second mode is <i>overlap</i> partitioning.  
+Overlap partitioning is very similar to static partitioning in that 
+each bgblocks must be explicitly defined in the <i>bluegene.conf</i> 
+file, but these partitions can overlap each other.  
+In this mode <b>it is highly recommended that none of the bgblocks 
+have any passthroughs in the X-dimension associated to them</b>.
+Usually this is only an issue on larger bluegene systems.  
+<b>It is advisable to use this mode with extreme caution.</b>
+Make sure you know what you doing to assure the bgblocks will 
+boot without dependency on the state of any base partition 
+not included the bgblock.</p>  
+
+<p>In the two previous modes you must insure that the base 
+partitions defined in <i>bluegene.conf</i> are consistent with 
+those defined in <i>slurm.conf</i>.
+Note the <i>bluegene.conf</i> file contains only the numeric 
+coordinates of base partitions while <i>slurm.conf</i> contains 
+the name prefix in addition to the numeric coordinates.</p>
+
+<p>The final mode is <i>dynamic</i> partitioning.  
+Dynamic partitioning was developed primarily for smaller BlueGene systems, 
+but can be used on larger systems.
+Dynamic partitioning may introduce fragmentation of resources.
+This fragementaiton may be severe since SLURM will run a job anywhere 
+resources are avaliable with little thought of the future.  
+As with overlap partitioning, <b>use dynamic partitioning with 
+caution!</b>  
+This mode can result in job starvation since smaller jobs will run 
+if resources are avaliable and prevent larger jobs from running.
+Bgblocks need not be assigned in the <i>bluegene.conf</i> file 
+for this mode.</p>
+
+<p>One of these modes must be defined in the <i>bluegene.conf</i> file 
+with the option <i>LayoutMode=MODE</i> (where MODE=STATIC, DYNAMIC or OVERLAP).</p>
+
+<p>The number of c-nodes in a base partition and in a node card must 
+be defined.  
+This is done using the keywords <i>BasePartitionNodeCnt=NODE_COUNT</i> 
+and <i>NodeCardNodeCnt=NODE_COUNT</i> respectively in the <i>bluegene.conf</i>
+file (i.e. <i>BasePartitionNodeCnt=512</i> and <i>NodeCardNodeCnt=32</i>).</p>
+
+<p>Note that the <i>Image</i> and <i>Numpsets</i> values defined in 
+<i>bluegene.conf</i> are used only when SLURM creates bgblocks.
 If previously defined bgblocks are used by SLURM, their configurations 
 are not altered.
 If you change the bgblock layout, then slurmctld and slurmd should 
 both be cold-started (e.g. <b>/etc/init.d/slurm startclean</b>).
-If you which to modify the Image and Numpsets values for existing
-bgblocks, either modify them manually or destroy the bgblocks
+If you which to modify the <i>Image</i> and <i>Numpsets</i> values 
+for existing bgblocks, either modify them manually or destroy the bgblocks
 and let SLURM recreate them. 
-Note that in addition to the bgblocks defined in bluegene.conf, an 
+Note that in addition to the bgblocks defined in <i>bluegene.conf</i>, an 
 additional bgblock is created containing all resources defined 
 all of the other defined bgblocks. 
-If you modify the bgblocks, it is recommended that you restart 
-both slurmctld and slurmd without preserving state 
-(<i>/etc/init.d/slurm startclean</i>).
-Note that SLURM wiring decisions are based upon the link-cards 
-being interconnected in a specific fashion. 
-If your BlueGene system is wired in an unconventional fashion, 
-modifications to the file <i>src/partition_allocator/partition_allocator.c</i> 
-may be required.
 Make use of the SLURM partition mechanism to control access to these 
-bgblocks. A sample <i>bluegene.conf</i> file is shown below.
+bgblocks. 
+A sample <i>bluegene.conf</i> file is shown below.
 <pre>
 ###############################################################################
-# Global specifications for Blue Gene system
+# Global specifications for BlueGene system
 #
-# BlrtsImage:     BlrtsImage used for creation of all bgblocks.
-# LinuxImage:     LinuxImage used for creation of all bgblocks.
-# MloaderImage:   MloaderImage used for creation of all bgblocks.
-# RamDiskImage:   RamDiskImage used for creation of all bgblocks.
-# Numpsets:       The Numpsets used for creation of all bgblocks 
-#                 equals this value multiplied by the number of 
-#                 base partitions in the bgblock.
+# BlrtsImage:           BlrtsImage used for creation of all bgblocks.
+# LinuxImage:           LinuxImage used for creation of all bgblocks.
+# MloaderImage:         MloaderImage used for creation of all bgblocks.
+# RamDiskImage:         RamDiskImage used for creation of all bgblocks.
+# LayoutMode:           Mode in which slurm will create blocks:
+#                       STATIC:  Use defined non-overlapping bgblocks
+#                       OVERLAP: Use defined bgblocks, which may overlap
+#                       DYNAMIC: Create bgblocks as needed for each job
+# BasePartitionNodeCnt: Number of c-nodes per base partition  
+# NodeCardNodeCnt:      Number of c-nodes per node card.
+# Numpsets:             The Numpsets used for creation of all bgblocks 
+#                       equals this value multiplied by the number of 
+#                       base partitions in the bgblock.
 #
-# BridgeAPILogFile : Pathname of file in which to write the BG 
-#                    Bridge API logs.
+# BridgeAPILogFile:  Pathname of file in which to write the BG 
+#                    BG Bridge API logs.
 # BridgeAPIVerbose:  How verbose the BG Bridge API logs should be
 #                    0: Log only error and warning messages
 #                    1: Log level 0 and information messages
 #                    2: Log level 1 and basic debug messages
 #                    3: Log level 2 and more debug message
 #                    4: Log all messages
-# 
-# NOTE: The bg_serial value is set at configuration time using the 
-#       "--with-bg-serial=" option. Its default value is "BGL".
+#
+# NOTE: The bgl_serial value is set at configuration time using the 
+#       "--with-bgl-serial=" option. Its default value is "BGL".
 ###############################################################################
 BlrtsImage=/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts
 LinuxImage=/bgl/BlueLight/ppcfloor/bglsys/bin/zImage.elf
 MloaderImage=/bgl/BlueLight/ppcfloor/bglsys/bin/mmcs-mloader.rts
 RamDiskImage=/bgl/BlueLight/ppcfloor/bglsys/bin/ramdisk.elf
+LayoutMode=STATIC
+BasePartitionNodeCnt=512
+NodeCardNodeCnt=32
 Numpsets=8
-#
 BridgeAPILogFile=/var/log/slurm/bridgeapi.log
 BridgeAPIVerbose=0
 
 ###############################################################################
-# Define the static partitions (bgblocks)
+# Define the static/overlap partitions (bgblocks)
 #
-# Nodes: The base partitions (midplanes) in the bgblock using XYZ coordinates
+# BPs: The base partitions (midplanes) in the bgblock using XYZ coordinates
 # Type:  Connection type "MESH" or "TORUS" or "SMALL", default is "TORUS" 
 #        Type SMALL will divide a midplane into multiple bgblocks
-#        4 bgblocks each containing 128 c-nodes
-#        (smaller bgblocks are presently not supported)
+#        based off options NodeCards and Quarters to determine type of 
+#        small blocks.
 #
 # IMPORTANT NOTES:
 # * Ordering is very important for laying out switch wires.  Please create
@@ -377,25 +434,26 @@ BridgeAPIVerbose=0
 # * A bgblock is implicitly created containing all resources on the system
 # * Bgblocks must not overlap (except for implicitly created bgblock)
 #   This will be the case when smap is used to create a configuration file
-# * All Nodes defined here must also be defined in the slurm.conf file
-# * Define only the numeric coordinates of the bglblocks here. The prefix
+# * All Base partitions defined here must also be defined in the slurm.conf file
+# * Define only the numeric coordinates of the bgblocks here. The prefix
 #   will be based upon the name defined in slurm.conf
 ###############################################################################
 # LEAVE NEXT LINE AS A COMMENT, Full-system bgblock, implicitly created
-# Nodes=[000x001] Type=TORUS       # 1x1x2 = 2 midplanes
+# BPs=[000x001] Type=TORUS       # 1x1x2 = 2 midplanes
 ###############################################################################
 # volume = 1x1x1 = 1
-Nodes=[000x000] Type=TORUS         # 1x1x1 =  1 midplane
-Nodes=[001x001] Type=SMALL         # 1x1x1 =  1 midplane, in four bgblocks
+BPs=[000x000] Type=TORUS                         # 1x1x1 =  1 midplane
+BPs=[001x001] Type=SMALL NodeCards=4 Quarters=3  # 1x1x1 = 4-Nodecard sized 
+                                                 # cnode blocks 3-Base 
+                                                 # Partition Quarter sized 
+                                                 # c-node blocks
 
 </pre></p>
 
 <p>The above <i>bluegene.conf</i> file defines multiple bgblocks to be 
 created in a single midplane (see the "SMALL" option). 
-Note that you can not presently control how many bgblocks are created 
-in a midplane, it will be four. 
-Using this mechanism, up to eight independent jobs can be executed 
-simultaneously on a one-rack Blue Gene system.
+Using this mechanism, up to 32 independent jobs can be executed 
+simultaneously on a one-rack BlueGene system.
 If defining bgblocks of <i>Type=SMALL</i>, the SLURM partition 
 containing them as defined in <i>slurm.conf</i> must have the 
 parameter <i>Shared=force</i> to enable scheduling of multiple 
@@ -412,7 +470,7 @@ DB2 database access is required by the slurmctld daemon only.
 All other SLURM daemons and commands interact with DB2 using 
 remote procedure calls, which are processed by slurmctld.
 DB2 access is dependent upon the environment variable
-<b>BRIDGE_CONFIG_FILE</b>. 
+<i>BRIDGE_CONFIG_FILE</i>. 
 Make sure this is set appropriate before initiating the 
 slurmctld daemon. 
 If desired, this environment variable and any other logic 
@@ -420,22 +478,15 @@ can be executed through the script <i>/etc/sysconfig/slurm</i>,
 which is automatically executed by <i>/etc/init.d/slurm</i> 
 prior to initiating the SLURM daemons.</p>
 
-<p>At some time in the future, we expect SLURM to support <i>dynamic 
-partitioning</i> in which Blue Gene job partitions are created and destroyed 
-as needed to accommodate the workload.
-At that time the <i>bluegene.conf</i> configuration file will become obsolete.
-Dynamic partition does involve substantial overhead including the 
-rebooting of c-nodes and I/O nodes.</p>
-
 <p>When slurmctld is initially started on an idle system, the bgblocks 
 already defined in MMCS are read using the BG Bridge APIs. 
-If these bgblocks do not correspond to those defined in the bluegene.conf 
+If these bgblocks do not correspond to those defined in the <i>bluegene.conf</i> 
 file, the old bgblocks with a prefix of "RMP" are destroyed and new ones 
 created. 
 When a job is scheduled, the appropriate bgblock is identified, 
 its user set, and it is booted. 
 Node use (virtual or coprocessor) is set from the mpirun command line now,
-Slurm has nothing to do with setting the node use.
+SLURM has nothing to do with setting the node use.
 Subsequent jobs use this same bgblock without rebooting by changing 
 the associated user field.
 The only time bgblocks should be freed and rebooted, in normal operation,
@@ -453,10 +504,10 @@ or rebooting of bgblocks.  </p>
 <p>Be aware that SLURM will issue multiple bgblock boot requests as 
 needed (e.g. when the boot fails). 
 If the bgblock boot requests repeatedly fail, SLURM will configure 
-the failing nodes to a DRAINED state so as to avoid continuing 
+the failing base partitions to a DRAINED state so as to avoid continuing 
 repeated reboots and the likely failure of user jobs. 
 A system administrator should address the problem before returning 
-the nodes to service.</p>
+the base partitions to service.</p>
 
 <p>If you cold-start slurmctld (<b>/etc/init.d/slurm startclean</b> 
 or <b>slurmctld -c</b>) it is recommended that you also cold-start 
@@ -464,31 +515,31 @@ the slurmd at the same time.
 Failure to do so may result in errors being reported by both slurmd 
 and slurmctld due to bgblocks that previously existed being deleted.</p>
 
-<p>A new tool <b>sfree</b> has also been added to help admins free a BG 
-block on request.  
+<p>A new tool <i>sfree</i> has also been added to help admins free a  
+bgblock on request.  
 <br>For usage use <b>sfree -u</b> and for help <b>-h</b>.</p>
 
 <h4>Debugging</h4>
 
 <p>All of the testing and debugging guidance provided in 
 <a href="quickstart_admin.html"> Quick Start Administrator Guide</a>
-apply to Blue Gene systems.
-One can start the <b>slurmctld</b> and <b>slurmd</b> in the foreground 
+apply to BlueGene systems.
+One can start the <i>slurmctld</i> and <i>slurmd</i> in the foreground 
 with extensive debugging to establish basic functionality. 
-Once running in production, the configured <b>SlurmctldLog</b> and 
-<b>SlurmdLog</b> files will provide historical system information.
-On Blue Gene systems, there is also a <b>BridgeAPILogFile</b> defined 
-in <b>bluegene.conf</b> which can be configured to contain detailed 
-information about every Bridge API call issued.</p>
+Once running in production, the configured <i>SlurmctldLog</i> and 
+<i>SlurmdLog</i> files will provide historical system information.
+On BlueGene systems, there is also a <i>BridgeAPILogFile</i> defined 
+in <i>bluegene.conf</i> which can be configured to contain detailed 
+information about every BG Bridge API call issued.</p>
 
 <p>Note that slurmcltld log messages of the sort 
 <i>Nodes bg[000x133] not responding</i> are indicative of the slurmd 
-daemon serving as a front-end to those nodes is not responding (on 
-non-Blue Gene systems, the slurmd actually does run on the compute 
+daemon serving as a front-end to those base partitions is not responding (on 
+non-BlueGene systems, the slurmd actually does run on the compute 
 nodes, so the message is more meaningful there). </p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 16 January 2006</p>
+<p style="text-align:center;">Last modified 2 March 2006</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 8c021422941..9bd184fd061 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -30,14 +30,6 @@ all modes except for 'configure mode' user can type 'quit' to exit just
 configure mode.  Typing 'exit' will end the configuration mode and exit smap.
 Note that unallocated nodes are indicated by a '.' and DOWN or DRAINED 
 nodes by a '#'.
-\fB\-R <RACK_MIDPLANE_ID/XYZ>\fR, \fB\-\-resolve=<RACK_MIDPLANE_ID/XYZ>\fR
-Returns the XYZ coords for a Rack/Midplane id or vice\-versa.
-
-To get the XYZ coord for a Rack/Midplane id input \-R R101 where 10 is the rack
-and 1 is the midplane.  
-
-To get the Rack/Midplane id from a XYZ coord input \-R 101 where X=1 Y=1 Z=1 with
-no leading 'R'.  
 .RS
 .TP 15
 .I "j"
@@ -52,6 +44,17 @@ Displays information about BG partitions on the system
 .I "c"
 Displays current node states and allows users to configure the system.
 .RE
+
+.TP
+\fB\-R <RACK_MIDPLANE_ID/XYZ>\fR, \fB\-\-resolve=<RACK_MIDPLANE_ID/XYZ>\fR
+Returns the XYZ coords for a Rack/Midplane id or vice\-versa.
+                                                                                  
+To get the XYZ coord for a Rack/Midplane id input \-R R101 where 10 is the rack
+and 1 is the midplane.
+                                                                                  
+To get the Rack/Midplane id from a XYZ coord input \-R 101 where X=1 Y=1 Z=1 with
+no leading 'R'.
+
 .TP
 \fB\-h\fR, \fB\-\-noheader\fR
 Do not print a header on the output.
@@ -99,11 +102,11 @@ Mode Type: \fBCOPROCESS\fR or \fBVIRTUAL\fR.
 \fBNAME\fR
 Name of the job.
 .TP
-\fBNODELIST\fR
-Names of nodes associated with this configuration/partition.
+\fBNODELIST\fR or \fBBP_LIST\fR
+Names of nodes or base partitions associated with this configuration/partition.
 .TP
 \fBNODES\fR
-Count of nodes with this particular configuration.
+Count of nodes or base partitions with this particular configuration.
 .TP
 \fBPARTITION\fR
 Name of a partition.  Note that the suffix "*" identifies the
@@ -140,7 +143,7 @@ with right side of one line being logically followed by the
 left side of the next line.
 .PP
 .nf
-On Blue Gene systems, the node chart will indicate the three 
+On BlueGene systems, the node chart will indicate the three 
 dimensional topography of the system.
 The X dimension will increase from left to right on a given line.
 The Y dimension will increase in planes from bottom to top.
@@ -168,12 +171,12 @@ a a a a . . e e               |
  a a a a . . . .            /
 a a a a . . . #            Z
 
-ID JOBID PARTITION USER   NAME ST TIME NODES NODELIST
-a  12345 batch    joseph tst1 R  43:12   64 bgl[000x333]
-b  12346 debug    chris  sim3 R  12:34   16 bgl[420x533]
-c  12350 debug    danny  job3 R   0:12    8 bgl[622x733]
-d  12356 debug    dan    colu R  18:05   16 bgl[600x731]
-e  12378 debug    joseph asx4 R   0:34    4 bgl[612x713]
+ID JOBID PARTITION BG_BLOCK USER   NAME ST  TIME NODES BP_LIST
+a  12345 batch     RMP0     joseph tst1 R  43:12   32k bgl[000x333]
+b  12346 debug     RMP1     chris  sim3 R  12:34    8k bgl[420x533]
+c  12350 debug     RMP2     danny  job3 R   0:12    4k bgl[622x733]
+d  12356 debug     RMP3     dan    colu R  18:05    8k bgl[600x731]
+e  12378 debug     RMP4     joseph asx4 R   0:34    2k bgl[612x713]
 
 .fi
 
@@ -186,22 +189,26 @@ blocks.
 .TP
 \fBOUTPUT\fR
 
+.RS
 .TP
-.I "BG_BLOCK" BlueGene Block Name.
+\fBBG_BLOCK\fR
+BlueGene Block Name.
 .TP
-.I "CONN"
+\fBCONN\fR
 Connection Type: \fBTORUS\fR or \fBMESH\fR or \fBSMALL\fR (for small blocks).
 .TP
-.I "ID"
+\fBID\fR
 Key to identify the nodes associated with this entity in the node chart.
 .TP
-.I "MODE"
+\fBMODE\fR
 Mode Type: \fBCOPROCESS\fR or \fBVIRTUAL\fR.
-	
+.RE
+
 .TP
 \fBINPUT COMMANDS\fR
+.RS
 .TP
-.I resolve <RACK_MIDPLANE_ID/XYZ>\fR
+\fBresolve <RACK_MIDPLANE_ID/XYZ>\fR
 Returns the XYZ coords for a Rack/Midplane id or vice\-versa.
 
 To get the XYZ coord for a Rack/Midplane id input \-R R101 where 10 is the rack
@@ -210,62 +217,66 @@ and 1 is the midplane.
 To get the Rack/Midplane id from a XYZ coord input \-R 101 where X=1 Y=1 Z=1 with
 no leading 'R'.  
 
-.RS
 .TP
-.I \fBload <bluegene.conf file>\fR
+\fBload <bluegene.conf file>\fR
 Load an already exsistant bluegene.conf file. This will varify and mapout a
 bluegene.conf file.  After loaded the configuration may be edited and 
 saved as a new file.
-.RS
-.TP .I \fBcreate <size> <options>\fR
+
+.TP 
+\fBcreate <size> <options>\fR
 Submit request for partition creation. The size may be specified either 
 as a count of base partitions or specific dimensions in the X, Y and Z 
 directions separated by "x", for example "2x3x4". A variety of options 
 may be specified. Valid options are listed below. Note that the option
 and their values are case insensitive (e.g. "MESH" and "mesh" are equivalent).
-.RS
 .TP
-.I \fBStart = XxYxZ\fR
+\fBStart = XxYxZ\fR
 Identify where to start the partition.  This is primarily for testing 
 purposes.  For convenience one can only put the X coord or XxY will also work.
 The default value is 0x0x0.
 .TP
-.I \fBConnection = MESH | TORUS | SMALL\fR
+\fBConnection = MESH | TORUS | SMALL\fR
 Identify how the nodes should be connected in network. 
 The default value is TORUS.
+.RS
 .TP
-.I \fBSmall\fR
+\fBSmall\fR
 Equivalent to "Connection=Small".
-If a small connection is specified the midplanes chosen will create 4
-smaller partitions within the midplane each consisting of 128 c\-nodes.
+If a small connection is specified the base partition chosen will create 
+smaller partitions based on options \fBNodeCards\fR and \fBQuarters\fR
+within the base partition.  These number will be altered to take up the 
+entire base partition. Size does not need to be specified with a small 
+request, we will always default to 1 base partition for allocation.
 .TP
-.I \fBMesh\fR
+\fBMesh\fR
 Equivalent to "Connection=Mesh".
 .TP
-.I \fBTorus\fR
+\fBTorus\fR
 Equivalent to "Connection=Torus".
+.RE
+
 .TP
-.I \fBRotation = TRUE | FALSE\fR
+\fBRotation = TRUE | FALSE\fR
 Specifies that the geometry specified in the size parameter may 
 be rotated in space (e.g. the Y and Z dimensions may be switched).
 The default value is FALSE.
 .TP
-.I \fBRotate\fR
+\fBRotate\fR
 Equivalent to "Rotation=true".
 .TP
-.I \fBElongation = TRUE | FALSE\fR
+\fBElongation = TRUE | FALSE\fR
 If TRUE, permit the geometry specified in the size parameter to be altered as 
 needed to fit available resources. 
 For example, an allocation of "4x2x1" might be used to satisfy a size specification 
 of "2x2x2". 
 The default value is FALSE.
 .TP
-.I \fBElongate\fR
+\fBElongate\fR
 Equivalent to "Elongation=true".
-.RE
 
 .TP
-.I \fBcopy <id> <count>\fR
+\fBcopy <id> <count>\fR
 Submit request for partition to be copied. 
 You may copy a specific partition by specifying its id, by default the 
 last configured partition is copied. 
@@ -273,33 +284,34 @@ You may also specify a number of copies to be made.
 By default, one copy is made.
 
 .TP
-.I \fBdelete <id>\fR
+\fBdelete <id>\fR
 Delete the specified block. 
 
 .TP
-.I \fBdown <node_range>\fR
+\fBdown <node_range>\fR
 Down a specific node or range of nodes. 
 i.e. 000, 000\-111 [000x111]
 .TP
-.I \fBup <node_range>\fR
+\fBup <node_range>\fR
 Bring a specific node or range of nodes up. 
 i.e. 000, 000\-111 [000x111]
 .TP
-.I \fBalldown\fR
+\fBalldown\fR
 Set all nodes to down state.
 .TP
-.I \fBallup\fR
+\fBallup\fR
 Set all nodes to up state.
 
 .TP
-.I \fBsave <file_name>\fR
+\fBsave <file_name>\fR
 Save the current configuration to a file. 
 If no file_name is specified, the configuration is written to a 
 file named "bluegene.conf" in the current working directory.
 
 .TP
-.I \fBclear\fR
+\fBclear\fR
 Clear all partitions created.
+.RE
 
 .SH "NODE STATE CODES"
 .PP
@@ -310,14 +322,14 @@ any new work.  If the node remains non\-responsive, it will
 be placed in the \fBDOWN\fR state (except in the case of
 \fBDRAINED\fR, \fBDRAINING\fR, or \fBCOMPLETING\fR nodes).
 .TP 12
-ALLOCATED
+\fBALLOCATED\fR
 The node has been allocated to one or more jobs.
 .TP
-ALLOCATED+
+\fBALLOCATED+\fR
 The node is allocated to one or more active jobs plus
 one or more jobs are in the process of COMPLETING.
 .TP
-COMPLETING
+\fBCOMPLETING\fR
 All jobs associated with this node are in the process of 
 COMPLETING.  This node state will be removed when
 all of the job's processes have terminated and the SLURM
@@ -325,7 +337,7 @@ epilog program (if any) has terminated. See the \fBEpilog\fR
 parameter description in the \fBslurm.conf\fR man page for
 more information.
 .TP
-DOWN
+\fBDOWN\fR
 The node is unavailable for use. SLURM can automatically
 place nodes in this state if some failure occurs. System
 administrators may also explicitly place nodes in this state. If
@@ -334,13 +346,13 @@ return it to service. See the \fBReturnToService\fR
 and \fBSlurmdTimeout\fR parameter descriptions in the
 \fBslurm.conf\fR(5) man page for more information.
 .TP
-DRAINED
+\fBDRAINED\fR
 The node is unavailable for use per system administrator
 request.  See the \fBupdate node\fR command in the
 \fBscontrol\fR(1) man page or the \fBslurm.conf\fR(5) man page
 for more information.
 .TP
-DRAINING
+\fBDRAINING\fR
 The node is currently executing a job, but will not be allocated
 to additional jobs. The node state will be changed to state
 \fBDRAINED\fR when the last job on it completes. Nodes enter
@@ -348,45 +360,46 @@ this state per system administrator request. See the \fBupdate
 node\fR command in the \fBscontrol\fR(1) man page or the
 \fBslurm.conf\fR(5) man page for more information.
 .TP
-IDLE
+\fBIDLE\fR
 The node is not allocated to any jobs and is available for use.
 .TP
-UNKNOWN
+\fBUNKNOWN\fR
 The SLURM controller has just started and the node's state
 has not yet been determined.
 
 .SH "JOB STATE CODES"
 Jobs typically pass through several states in the course of their
 execution.
-The typical states are PENDING, RUNNING, SUSPENDED, COMPLETING, and COMPLETED.
+The typical states are \fBPENDING\fR, \fBRUNNING\fR, \fBSUSPENDED\fR, 
+\fBCOMPLETING\fR, and \fBCOMPLETED\fR.
 An explanation of each state follows.
 .TP 20
-CA  CANCELLED
+\fBCA  CANCELLED\fR
 Job was explicitly cancelled by the user or system administrator.
 The job may or may not have been initiated.
 .TP
-CD  COMPLETED
+\fBCD  COMPLETED\fR
 Job has terminated all processes on all nodes.
 .TP
-CG  COMPLETING
+\fBCG  COMPLETING\fR
 Job is in the process of completing. Some processes on some nodes may still be active.
 .TP
-F   FAILED
+\fBF   FAILED\fR
 Job terminated with non\-zero exit code or other failure condition.
 .TP
-NF  NODE_FAIL
+\fBNF  NODE_FAIL\fR
 Job terminated due to failure of one or more allocated nodes.
 .TP
-PD  PENDING
+\fBPD  PENDING\fR
 Job is awaiting resource allocation.
 .TP
-R   RUNNING
+\fBR   RUNNING\fR
 Job currently has an allocation.
 .TP
-S   SUSPENDED
+\fBS   SUSPENDED\fR
 Job has an allocation, but execution has been suspended.
 .TP
-TO  TIMEOUT
+\fBTO  TIMEOUT\fR
 Job terminated upon reaching its time limit.
 
 .SH "ENVIRONMENT VARIABLES"
diff --git a/etc/bluegene.conf.example b/etc/bluegene.conf.example
index d77180a7720..fda38f4a535 100644
--- a/etc/bluegene.conf.example
+++ b/etc/bluegene.conf.example
@@ -1,13 +1,19 @@
 ###############################################################################
 # Global specifications for BlueGene system
 #
-# BlrtsImage:     BlrtsImage used for creation of all bglblocks.
-# LinuxImage:     LinuxImage used for creation of all bglblocks.
-# MloaderImage:   MloaderImage used for creation of all bglblocks.
-# RamDiskImage:   RamDiskImage used for creation of all bglblocks.
-# Numpsets:       The Numpsets used for creation of all bglblocks 
-#                 equals this value multiplied by the number of 
-#                 base partitions in the bglblock.
+# BlrtsImage:           BlrtsImage used for creation of all bgblocks.
+# LinuxImage:           LinuxImage used for creation of all bgblocks.
+# MloaderImage:         MloaderImage used for creation of all bgblocks.
+# RamDiskImage:         RamDiskImage used for creation of all bgblocks.
+# LayoutMode:           Mode in which slurm will create blocks:
+#                       STATIC:  Use defined non-overlapping bgblocks
+#                       OVERLAP: Use defined bgblocks, which may overlap
+#                       DYNAMIC: Create bgblocks as needed for each job
+# BasePartitionNodeCnt: Number of c-nodes per base partition.  
+# NodeCardNodeCnt:      Number of c-nodes per node card.
+# Numpsets:             The Numpsets used for creation of all bgblocks 
+#                       equals this value multiplied by the number of 
+#                       base partitions in the bgblock.
 #
 # BridgeAPILogFile : Pathname of file in which to write the BGL 
 #                    Bridge API logs.
@@ -25,27 +31,30 @@ BlrtsImage=/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts
 LinuxImage=/bgl/BlueLight/ppcfloor/bglsys/bin/zImage.elf
 MloaderImage=/bgl/BlueLight/ppcfloor/bglsys/bin/mmcs-mloader.rts
 RamDiskImage=/bgl/BlueLight/ppcfloor/bglsys/bin/ramdisk.elf
+LayoutMode=STATIC
+BasePartitionNodeCnt=512
+NodeCardNodeCnt=32
 Numpsets=8
 
 BridgeAPILogFile=/var/log/slurm/bridgeapi.log
 BridgeAPIVerbose=0
 
 ###############################################################################
-# Define the static partitions (bglblocks)
+# Define the static/overlap partitions (bglblocks)
 #
 # Nodes: The base partitions (midplanes) in the bglblock using XYZ coordinates
 # Type:  Connection type "MESH" or "TORUS" or "SMALL", default is "TORUS" 
-#        Type SMALL will divide a midplane into multiple bglblocks
-#        4 bglblocks each containing 128 c-nodes
-#        (smaller bglblocks are presently not supported)
+#        Type SMALL will divide a midplane into multiple bgblock
+#        based off options num32 and num128 to determine type of small blocks.
 # 
 # IMPORTANT NOTES: 
 # * Ordering is very important for laying out switch wires.  Please create
 #   blocks with smap, and once done don't move the order of blocks
 #   created.
 # * A bglblock is implicitly created containing all resources on the system
-# * Bglblocks must not overlap (except for implicitly created bglblock)
-#   This will be the case when smap is used to create a configuration file
+# * Bglblocks must not overlap in static mode (except for implicitly 
+#   created bglblock) This will be the case when smap is used to create 
+#   a configuration file
 # * All Nodes defined here must also be defined in the slurm.conf file
 # * Define only the numeric coordinates of the bglblocks here. The prefix
 #   will be based upon the NodeName defined in slurm.conf
@@ -54,10 +63,10 @@ BridgeAPIVerbose=0
 # Nodes=[000x333] Type=TORUS        # 4x4x4 = 64 midplanes
 ###############################################################################
 # smap bglblock layout here:
-Nodes=[000x133] Type=TORUS          # 2x4x4 = 32
-Nodes=[200x233] Type=TORUS          # 1x4x4 = 16
-Nodes=[300x313] Type=TORUS          # 1x2x4 =  8
-Nodes=[320x323] Type=TORUS          # 1x1x4 =  4
-Nodes=[330x331] Type=TORUS          # 1x1x2 =  2
-Nodes=[332x332] Type=TORUS          # 1x1x1 =  1
-Nodes=[333x333] Type=SMALL          # 1x1x1 =  1, in four bglblocks
+BPs=[000x133] Type=TORUS          # 2x4x4 = 32
+BPs=[200x233] Type=TORUS          # 1x4x4 = 16
+BPs=[300x313] Type=TORUS          # 1x2x4 =  8
+BPs=[320x323] Type=TORUS          # 1x1x4 =  4
+BPs=[330x331] Type=TORUS          # 1x1x2 =  2
+BPs=[332x332] Type=TORUS          # 1x1x1 =  1
+BPs=[333x333] Type=SMALL num32=4 num128=3 # 1x1x1 = 4-32 cnode blocks 3-128 c-node blocks
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 8cfa63e3eaf..f19560841bc 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -193,7 +193,11 @@ enum select_data_type {
 	SELECT_DATA_NODE_USE,	/* data-> uint16_t node_use */
 	SELECT_DATA_CONN_TYPE,	/* data-> uint16_t connection_type */
 	SELECT_DATA_BLOCK_ID,	/* data-> char bg_block_id */
-	SELECT_DATA_QUARTER	/* data-> uint32_t quarter */
+	SELECT_DATA_QUARTER,	/* data-> uint16_t quarter */
+	SELECT_DATA_SEGMENT,	/* data-> uint16_t segment */
+	SELECT_DATA_NODE_CNT,	/* data-> uint32_t node_cnt */
+	SELECT_DATA_ALTERED,    /* data-> uint16_t altered */
+	SELECT_DATA_MAX_PROCS	/* data-> uint32_t max_procs */
 };
 
 enum select_print_mode {
@@ -203,6 +207,14 @@ enum select_print_mode {
 	SELECT_PRINT_BG_ID	/* Print just the BG_ID */
 };
 
+enum select_node_cnt {
+	SELECT_GET_NODE_MIN_OFFSET,   /* Give Min offset */ 
+	SELECT_GET_NODE_MAX_OFFSET,   /* Give Max offset */
+	SELECT_APPLY_NODE_MIN_OFFSET, /* Apply min offset to variable */
+	SELECT_APPLY_NODE_MAX_OFFSET, /* Apply max offset to variable */
+	SELECT_SET_NODE_CNT	      /* Set altered node cnt */
+};
+
 /* Possible task distributions across the nodes */
 enum task_dist_states {
 	SLURM_DIST_CYCLIC,	/* distribute tasks 1 per node, round robin */
@@ -344,17 +356,6 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	time_t begin_time;	/* delay initiation until this time */
 	uint16_t mail_type;	/* see MAIL_JOB_ definitions above */
 	char *mail_user;	/* user to receive notification */
-/*
- * The following parameters are only meaningful on a Blue Gene
- * system at present. Some will be of value on other system.
- */
-#if SYSTEM_DIMENSIONS
-	uint16_t geometry[SYSTEM_DIMENSIONS];	/* node count in various 
-				 * dimensions, e.g. X, Y, and Z */
-#endif
-	uint16_t conn_type;	/* see enum connection_type */
-	uint16_t rotate;	/* permit geometry rotation if set */
-/* End of Blue Gene specific values */
 	select_jobinfo_t select_jobinfo; /* opaque data type,
 			* SLURM internal use only */
 } job_desc_msg_t;
@@ -494,6 +495,8 @@ typedef struct partition_info {
 	uint32_t min_nodes;	/* per job */
 	uint32_t total_nodes;	/* total number of nodes in the partition */
 	uint32_t total_cpus;	/* total number of cpus in the partition */
+	uint32_t min_offset;	/* select plugin min offset */
+	uint32_t max_offset;	/* select plugin max offset */
 	uint16_t default_part;	/* 1 if this is default partition */
 	uint16_t hidden;	/* 1 if partition is hidden by default */
 	uint16_t root_only;	/* 1 if allocate must come for user root */
diff --git a/src/api/init_msg.c b/src/api/init_msg.c
index 0acb13d0734..daa7c0eefc9 100644
--- a/src/api/init_msg.c
+++ b/src/api/init_msg.c
@@ -84,16 +84,7 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->mail_user   = NULL;
 	job_desc_msg->port        = 0;
 	job_desc_msg->begin_time  = 0;
-#if SYSTEM_DIMENSIONS
-{
-	int i;
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		job_desc_msg->geometry[i] = (uint16_t) NO_VAL;
-}
-#endif
-	job_desc_msg->conn_type   = (uint16_t) NO_VAL;
-	job_desc_msg->rotate      = (uint16_t) NO_VAL;
-	job_desc_msg->exclusive   = (uint16_t) NO_VAL;
+	job_desc_msg->select_jobinfo = NULL;
 }
 
 /*
diff --git a/src/api/job_info.c b/src/api/job_info.c
index ec472e3b1cd..0b289d197dd 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -81,6 +81,21 @@ slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 	int j;
 	char time_str[16], select_buf[128];
 	struct group *group_info = NULL;
+	char tmp1[7], tmp2[7];
+	uint16_t quarter = (uint16_t) NO_VAL;
+	uint16_t segment = (uint16_t) NO_VAL;
+	
+#ifdef HAVE_BG
+	char *nodelist = "BP_List";
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_QUARTER, 
+			     &quarter);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_SEGMENT, 
+			     &segment);
+#else
+	char *nodelist = "NodeList";
+#endif	
 
 	/****** Line 1 ******/
 	fprintf ( out, "JobId=%u ", job_ptr->job_id);
@@ -144,8 +159,18 @@ slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 		fprintf ( out, "\n   ");
 
 	/****** Line 6 ******/
-	fprintf ( out, "NodeList=%s ", job_ptr->nodes);
-	fprintf ( out, "NodeListIndices=");
+	fprintf ( out, "%s=%s", nodelist, job_ptr->nodes);
+	if(job_ptr->nodes) {
+		if(quarter != (uint16_t) NO_VAL) {
+			if(segment != (uint16_t) NO_VAL) 
+				fprintf( out, ".%d.%d", quarter, segment);
+			else
+				fprintf( out, ".%d", quarter);
+		} 
+	}
+	fprintf ( out, " ");
+		
+	fprintf ( out, "%sIndices=", nodelist);
 	for (j = 0; job_ptr->node_inx; j++) {
 		if (j > 0)
 			fprintf( out, ",%d", job_ptr->node_inx[j]);
@@ -160,21 +185,31 @@ slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 		fprintf ( out, "\n   ");
 
 	/****** Line 7 ******/
-	fprintf ( out, "ReqProcs=%u MinNodes=%u ", 
-		job_ptr->num_procs, job_ptr->num_nodes);
-	fprintf ( out, "Shared=%u Contiguous=%u ",  
-		job_ptr->shared, job_ptr->contiguous);
-	fprintf ( out, "CPUs/task=%u", job_ptr->cpus_per_task);
+	convert_to_kilo(job_ptr->num_procs, tmp1);
+	convert_to_kilo(job_ptr->num_nodes, tmp2);
+#ifdef HAVE_BG
+	fprintf ( out, "ReqProcs=%s MinBPs=%s ", tmp1, tmp2);
+#else
+	fprintf ( out, "ReqProcs=%s MinNodes=%s ", tmp1, tmp2);
+#endif
+	convert_to_kilo(job_ptr->shared, tmp1);
+	convert_to_kilo(job_ptr->contiguous, tmp2);
+	fprintf ( out, "Shared=%s Contiguous=%s ", tmp1, tmp2);
+	
+	convert_to_kilo(job_ptr->cpus_per_task, tmp1);
+	fprintf ( out, "CPUs/task=%s", tmp1);
 	if (one_liner)
 		fprintf ( out, " ");
 	else
 		fprintf ( out, "\n   ");
 
 	/****** Line 8 ******/
-	fprintf ( out, "MinProcs=%u MinMemory=%u ",  
-		job_ptr->min_procs, job_ptr->min_memory);
-	fprintf ( out, "Features=%s MinTmpDisk=%u", 
-		job_ptr->features, job_ptr->min_tmp_disk);
+	convert_to_kilo(job_ptr->min_procs, tmp1);
+	convert_to_kilo(job_ptr->min_memory, tmp2);
+	fprintf ( out, "MinProcs=%s MinMemory=%s ", tmp1, tmp2);
+
+	convert_to_kilo(job_ptr->min_tmp_disk, tmp1);
+	fprintf ( out, "Features=%s MinTmpDisk=%s", job_ptr->features, tmp1);
 	if (one_liner)
 		fprintf ( out, " ");
 	else
@@ -191,8 +226,8 @@ slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 
 
 	/****** Line 10 ******/
-	fprintf ( out, "ReqNodeList=%s ", job_ptr->req_nodes);
-	fprintf ( out, "ReqNodeListIndices=");
+	fprintf ( out, "Req%s=%s ", nodelist, job_ptr->req_nodes);
+	fprintf ( out, "Req%sIndices=", nodelist);
 	for (j = 0; job_ptr->req_node_inx; j++) {
 		if (j > 0)
 			fprintf( out, ",%d", job_ptr->req_node_inx[j]);
@@ -207,8 +242,8 @@ slurm_print_job_info ( FILE* out, job_info_t * job_ptr, int one_liner )
 		fprintf ( out, "\n   ");
 
 	/****** Line 11 ******/
-	fprintf ( out, "ExcNodeList=%s ", job_ptr->exc_nodes);
-	fprintf ( out, "ExcNodeListIndices=");
+	fprintf ( out, "Exc%s=%s ", nodelist, job_ptr->exc_nodes);
+	fprintf ( out, "Exc%sIndices=", nodelist);
 	for (j = 0; job_ptr->exc_node_inx; j++) {
 		if (j > 0)
 			fprintf( out, ",%d", job_ptr->exc_node_inx[j]);
diff --git a/src/api/node_info.c b/src/api/node_info.c
index 20e81033dfe..3e89affd445 100644
--- a/src/api/node_info.c
+++ b/src/api/node_info.c
@@ -125,15 +125,15 @@ extern int slurm_load_node (time_t update_time,
         slurm_msg_t req_msg;
         slurm_msg_t resp_msg;
         node_info_request_msg_t req;
-
+	
         req.last_update  = update_time;
 	req.show_flags   = show_flags;
         req_msg.msg_type = REQUEST_NODE_INFO;
         req_msg.data     = &req;
-
+	
 	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
 		return SLURM_ERROR;
-
+		
 	switch (resp_msg.msg_type) {
 	case RESPONSE_NODE_INFO:
 		*resp = (node_info_msg_t *) resp_msg.data;
diff --git a/src/api/node_select_info.c b/src/api/node_select_info.c
index 051f5422fb7..0b4184179c4 100644
--- a/src/api/node_select_info.c
+++ b/src/api/node_select_info.c
@@ -64,10 +64,10 @@ extern int slurm_load_node_select (time_t update_time,
         req.last_update  = update_time;
         req_msg.msg_type = REQUEST_NODE_SELECT_INFO;
         req_msg.data     = &req;
-
+	
 	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
 		return SLURM_ERROR;
-
+	
 	switch (resp_msg.msg_type) {
 	case RESPONSE_NODE_SELECT_INFO:
 		*node_select_info_msg_pptr = (node_select_info_msg_t *) 
diff --git a/src/api/node_select_info.h b/src/api/node_select_info.h
index 77ed817758f..6acb18d2da7 100644
--- a/src/api/node_select_info.h
+++ b/src/api/node_select_info.h
@@ -44,6 +44,8 @@ typedef struct {
 	int conn_type;
 	int node_use;
 	int quarter;
+	int segment;
+	int node_cnt;
 } bg_info_record_t;
 
 typedef struct {
diff --git a/src/api/partition_info.c b/src/api/partition_info.c
index c237c9142dc..296b6f23184 100644
--- a/src/api/partition_info.c
+++ b/src/api/partition_info.c
@@ -72,12 +72,17 @@ void slurm_print_partition_info_msg ( FILE* out,
 void slurm_print_partition_info ( FILE* out, partition_info_t * part_ptr, 
 				  int one_liner )
 {
-	int j ;
+	int j;
+	char tmp1[7], tmp2[7];
 
 	/****** Line 1 ******/
 	fprintf ( out, "PartitionName=%s ", part_ptr->name);
-	fprintf ( out, "TotalNodes=%u ", part_ptr->total_nodes);
-	fprintf ( out, "TotalCPUs=%u ", part_ptr->total_cpus);
+
+	convert_to_kilo(part_ptr->total_nodes, tmp1);
+	fprintf ( out, "TotalNodes=%s ", tmp1);
+
+	convert_to_kilo(part_ptr->total_cpus, tmp1);
+	fprintf ( out, "TotalCPUs=%s ", tmp1);
 	if (part_ptr->root_only)
 		fprintf ( out, "RootOnly=YES");
 	else
@@ -116,11 +121,14 @@ void slurm_print_partition_info ( FILE* out, partition_info_t * part_ptr,
 		fprintf ( out, "\n   ");
 
 	/****** Line 3 ******/
-	fprintf ( out, "MinNodes=%u ", part_ptr->min_nodes);
+	convert_to_kilo(part_ptr->min_nodes, tmp1);
+	fprintf ( out, "MinNodes=%s ", tmp1);
 	if (part_ptr->max_nodes == INFINITE)
 		fprintf ( out, "MaxNodes=UNLIMITED ");
-	else
-		fprintf ( out, "MaxNodes=%u ", part_ptr->max_nodes);
+	else {
+		convert_to_kilo(part_ptr->max_nodes, tmp1);
+		fprintf ( out, "MaxNodes=%s ", tmp1);
+	}
 	if ((part_ptr->allow_groups == NULL) || 
 	    (part_ptr->allow_groups[0] == '\0'))
 		fprintf ( out, "AllowGroups=ALL");
@@ -132,7 +140,11 @@ void slurm_print_partition_info ( FILE* out, partition_info_t * part_ptr,
 		fprintf ( out, "\n   ");
 
 	/****** Line 4 ******/
+#ifdef HAVE_BG
+	fprintf ( out, "BasePartitions=%s BPIndices=", part_ptr->nodes);
+#else
 	fprintf ( out, "Nodes=%s NodeIndices=", part_ptr->nodes);
+#endif
 	for (j = 0; part_ptr->node_inx; j++) {
 		if (j > 0)
 			fprintf( out, ",%d", part_ptr->node_inx[j]);
@@ -168,6 +180,7 @@ extern int slurm_load_partitions (time_t update_time,
 	req.show_flags   = show_flags;
         req_msg.msg_type = REQUEST_PARTITION_INFO;
         req_msg.data     = &req;
+	
 
 	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
 		return SLURM_ERROR;
diff --git a/src/api/signal.c b/src/api/signal.c
index 114d6cf9a16..b12b91834fa 100644
--- a/src/api/signal.c
+++ b/src/api/signal.c
@@ -459,7 +459,8 @@ slurm_terminate_job_step (uint32_t job_id, uint32_t step_id)
 	 * Otherwise, look through the list of job step info and find
 	 * the one matching step_id.  Terminate that step.
 	 */
-	rc = slurm_get_job_steps((time_t)0, job_id, step_id, &step_info, SHOW_ALL);
+	rc = slurm_get_job_steps((time_t)0, job_id, step_id, 
+				 &step_info, SHOW_ALL);
 	if (rc != 0) {
 		save_errno = errno;
 		goto fail;
@@ -548,7 +549,7 @@ _job_step_wait(uint32_t jobid, uint32_t stepid,
  */
 static int
 _terminate_job_step(const job_step_info_t *step,
-		 const resource_allocation_response_msg_t *allocation)
+		    const resource_allocation_response_msg_t *allocation)
 {
 	slurm_msg_t *msg; /* array of message structs, one per node */
 	kill_tasks_msg_t rpc;
@@ -629,4 +630,3 @@ static int _terminate_batch_script_step(
 	return rc;
 }
 
-
diff --git a/src/common/forward.c b/src/common/forward.c
index a620ca9025f..5044ff04e34 100644
--- a/src/common/forward.c
+++ b/src/common/forward.c
@@ -144,7 +144,7 @@ nothing_sent:
 		type->type = msg.msg_type;
 		type->msg_rc = ((return_code_msg_t *)msg.data)->return_code;
 		ret_data_info->data = msg.data;
-		g_slurm_auth_destroy(msg.cred);
+		slurm_free_cred(msg.cred);
 	}
 	slurm_mutex_lock(fwd_msg->forward_mutex);
 	while((returned_type = list_pop(ret_list)) != NULL) {
diff --git a/src/common/node_select.c b/src/common/node_select.c
index b0811a6c839..1bd0af526cb 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -65,8 +65,10 @@ typedef struct slurm_select_ops {
 						  int node_cnt);
 	int 		(*part_init)		( List part_list );
 	int		(*job_test)		( struct job_record *job_ptr,
-						  bitstr_t *bitmap, int min_nodes, 
-						  int max_nodes, bool test_only );
+						  bitstr_t *bitmap, 
+						  int min_nodes, 
+						  int max_nodes,
+						  bool test_only);
 	int		(*job_begin)		( struct job_record *job_ptr );
 	int		(*job_ready)		( struct job_record *job_ptr );
 	int		(*job_fini)		( struct job_record *job_ptr );
@@ -85,6 +87,8 @@ typedef struct slurm_select_ops {
                                                   enum select_data_info cr_info);
         int             (*get_info_from_plugin) ( enum select_data_info cr_info, 
                                                   void *data);
+	int             (*alter_node_cnt)       ( enum select_node_cnt type,
+						  void *data);
 } slurm_select_ops_t;
 
 typedef struct slurm_select_context {
@@ -109,17 +113,23 @@ struct select_jobinfo {
 	uint16_t node_use;	/* see enum node_use_type */
 	char *bg_block_id;	/* Blue Gene partition ID */
 	uint16_t magic;		/* magic number */
-	int32_t quarter;  	/* for bg to tell which quarter of a small
+	uint16_t quarter;        /* for bg to tell which quarter of a small
 				   partition the job is running */ 
+	uint16_t segment;        /* for bg to tell which segment of a quarter 
+				   of a small partition the job is running */ 
+	uint32_t node_cnt;      /* how many cnodes in block */ 
+	uint16_t altered;       /* see if we have altered this job 
+				   or not yet */
+	uint32_t max_procs;	/* maximum processors to use */
 };
 #endif
 
 /*
  * Local functions
  */
-static slurm_select_context_t *	_select_context_create(const char *select_type);
-static int 			_select_context_destroy(slurm_select_context_t *c);
-static slurm_select_ops_t *	_select_get_ops(slurm_select_context_t *c);
+static slurm_select_context_t *_select_context_create(const char *select_type);
+static int _select_context_destroy(slurm_select_context_t *c);
+static slurm_select_ops_t *_select_get_ops(slurm_select_context_t *c);
 
 /*
  * Locate and load the appropriate plugin
@@ -145,7 +155,8 @@ static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c)
                 "select_p_get_extra_jobinfo",
                 "select_p_get_select_nodeinfo",
                 "select_p_update_nodeinfo",
-                "select_p_get_info_from_plugin"
+                "select_p_get_info_from_plugin",
+		"select_p_alter_node_cnt"
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
@@ -341,7 +352,8 @@ extern int select_g_block_init(List part_list)
  * Get selected data from a given node for a specific job. 
  * IN node_ptr  - current node record
  * IN job_ptr   - current job record
- * IN cr_info   - type of data to get from the node record (see enum select_data_info)
+ * IN cr_info   - type of data to get from the node record 
+ *                (see enum select_data_info)
  * IN/OUT data  - the data to get from node record
  */
 extern int select_g_get_extra_jobinfo (struct node_record *node_ptr, 
@@ -352,7 +364,10 @@ extern int select_g_get_extra_jobinfo (struct node_record *node_ptr,
        if (slurm_select_init() < 0)
                return SLURM_ERROR;
 
-       return (*(g_select_context->ops.get_extra_jobinfo))(node_ptr, job_ptr, cr_info, data);
+       return (*(g_select_context->ops.get_extra_jobinfo))(node_ptr, 
+							   job_ptr, 
+							   cr_info, 
+							   data);
 }
 
 /* 
@@ -362,20 +377,25 @@ extern int select_g_get_extra_jobinfo (struct node_record *node_ptr,
  * IN/OUT data  - the data to get from node record
  */
 extern int select_g_get_select_nodeinfo (struct node_record *node_ptr, 
-                                         enum select_data_info cr_info, void *data)
+                                         enum select_data_info cr_info, 
+					 void *data)
 {
        if (slurm_select_init() < 0)
                return SLURM_ERROR;
 
-       return (*(g_select_context->ops.get_select_nodeinfo))(node_ptr, cr_info, data);
+       return (*(g_select_context->ops.get_select_nodeinfo))(node_ptr, 
+							     cr_info, 
+							     data);
 }
 
 /* 
  * Update select data for a specific node record for a specific job 
- * IN cr_info   - type of data to update for a given job record (see enum select_data_info)
+ * IN cr_info   - type of data to update for a given job record 
+ *                (see enum select_data_info)
  * IN job_ptr - current job record
  */
-extern int select_g_update_nodeinfo (struct job_record *job_ptr, enum select_data_info cr_info)
+extern int select_g_update_nodeinfo (struct job_record *job_ptr, 
+				     enum select_data_info cr_info)
 {
        if (slurm_select_init() < 0)
                return SLURM_ERROR;
@@ -386,10 +406,12 @@ extern int select_g_update_nodeinfo (struct job_record *job_ptr, enum select_dat
 /* 
  * Get select data from a plugin
  * IN node_pts  - current node record
- * IN cr_info   - type of data to get from the node record (see enum select_data_info)
+ * IN cr_info   - type of data to get from the node record 
+ *                (see enum select_data_info)
  * IN/OUT data  - the data to get from node record
  */
-extern int select_g_get_info_from_plugin (enum select_data_info cr_info, void *data)
+extern int select_g_get_info_from_plugin (enum select_data_info cr_info, 
+					  void *data)
 {
        if (slurm_select_init() < 0)
                return SLURM_ERROR;
@@ -397,6 +419,18 @@ extern int select_g_get_info_from_plugin (enum select_data_info cr_info, void *d
        return (*(g_select_context->ops.get_info_from_plugin))(cr_info, data);
 }
 
+/* 
+ * Alter the node count for a job given the type of system we are on
+ * IN/OUT job_desc  - current job desc
+ */
+extern int select_g_alter_node_cnt (enum select_node_cnt type, void *data)
+{
+       if (slurm_select_init() < 0)
+               return SLURM_ERROR;
+
+       return (*(g_select_context->ops.alter_node_cnt))(type, data);
+}
+
 /*
  * Select the "best" nodes for given job from those available
  * IN job_ptr - pointer to job being considered for initiation
@@ -413,7 +447,8 @@ extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		return SLURM_ERROR;
 
 	return (*(g_select_context->ops.job_test))(job_ptr, bitmap, 
-		min_nodes, max_nodes, test_only);
+						   min_nodes, max_nodes, 
+						   test_only);
 }
 
 /*
@@ -466,7 +501,7 @@ extern int select_g_job_suspend(struct job_record *job_ptr)
 
 	return (*(g_select_context->ops.job_suspend))(job_ptr);
 }
-                                                                                
+
 /*
  * Resume a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being resumed
@@ -496,6 +531,8 @@ static char *_job_conn_type_string(uint16_t inx)
 		return "torus";
 	else if (inx == SELECT_MESH)
 		return "mesh";
+	else if (inx == SELECT_SMALL)
+		return "small";
 	else
 		return "nav";
 }
@@ -515,11 +552,22 @@ static char *_job_rotate_string(uint16_t inx)
  */
 extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo)
 {
+	int i;
 	xassert(jobinfo != NULL);
-
+	
 	*jobinfo = xmalloc(sizeof(struct select_jobinfo));
+	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+			(*jobinfo)->geometry[i] = 0;
+	(*jobinfo)->conn_type = SELECT_NAV;
+	(*jobinfo)->rotate = 1;
+	(*jobinfo)->node_use = SELECT_NAV;
+	(*jobinfo)->bg_block_id = NULL;
 	(*jobinfo)->magic = JOBINFO_MAGIC;
-	(*jobinfo)->quarter = -1;
+	(*jobinfo)->quarter = (uint16_t) NO_VAL;
+	(*jobinfo)->segment = (uint16_t) NO_VAL;
+	(*jobinfo)->node_cnt = NO_VAL;
+	(*jobinfo)->max_procs =  NO_VAL;
+	
 	return SLURM_SUCCESS;
 }
 
@@ -532,9 +580,9 @@ extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
 		enum select_data_type data_type, void *data)
 {
 	int i, rc = SLURM_SUCCESS;
-	uint16_t *tmp_16 = (uint16_t *) data;
-	int32_t *tmp_32 = (uint32_t *) data;
-	char * tmp_char = (char *) data;
+	uint16_t *uint16 = (uint16_t *) data;
+	uint32_t *uint32 = (uint32_t *) data;
+	char *tmp_char = (char *) data;
 	
 	if (jobinfo->magic != JOBINFO_MAGIC) {
 		error("select_g_set_jobinfo: jobinfo magic bad");
@@ -543,17 +591,17 @@ extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
 
 	switch (data_type) {
 	case SELECT_DATA_GEOMETRY:
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			jobinfo->geometry[i] = tmp_16[i];
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) 
+			jobinfo->geometry[i] = uint16[i];
 		break;
 	case SELECT_DATA_ROTATE:
-		jobinfo->rotate = *tmp_16;
+		jobinfo->rotate = *uint16;
 		break;
 	case SELECT_DATA_NODE_USE:
-		jobinfo->node_use = *tmp_16;
+		jobinfo->node_use = *uint16;
 		break;
 	case SELECT_DATA_CONN_TYPE:
-		jobinfo->conn_type = *tmp_16;
+		jobinfo->conn_type = *uint16;
 		break;
 	case SELECT_DATA_BLOCK_ID:
 		/* we xfree() any preset value to avoid a memory leak */
@@ -561,7 +609,19 @@ extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
 		jobinfo->bg_block_id = xstrdup(tmp_char);
 		break;
 	case SELECT_DATA_QUARTER:
-		jobinfo->quarter = *tmp_32;
+		jobinfo->quarter = *uint16;
+		break;
+	case SELECT_DATA_SEGMENT:
+		jobinfo->segment = *uint16;
+		break;
+	case SELECT_DATA_NODE_CNT:
+		jobinfo->node_cnt = *uint32;
+		break;
+	case SELECT_DATA_ALTERED:
+		jobinfo->altered = *uint16;
+		break;
+	case SELECT_DATA_MAX_PROCS:
+		jobinfo->max_procs = *uint32;
 		break;
 	default:
 		debug("select_g_set_jobinfo data_type %d invalid", 
@@ -581,8 +641,8 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
 		enum select_data_type data_type, void *data)
 {
 	int i, rc = SLURM_SUCCESS;
-	int32_t *tmp_32 = (uint32_t *) data;
-	uint16_t *tmp_16 = (uint16_t *) data;
+	uint16_t *uint16 = (uint16_t *) data;
+	uint32_t *uint32 = (uint32_t *) data;
 	char **tmp_char = (char **) data;
 
 	if (jobinfo->magic != JOBINFO_MAGIC) {
@@ -592,17 +652,18 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
 
 	switch (data_type) {
 	case SELECT_DATA_GEOMETRY:
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			tmp_16[i] = jobinfo->geometry[i];
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+			uint16[i] = jobinfo->geometry[i];
+		}
 		break;
 	case SELECT_DATA_ROTATE:
-		*tmp_16 = jobinfo->rotate;
+		*uint16 = jobinfo->rotate;
 		break;
 	case SELECT_DATA_NODE_USE:
-		*tmp_16 = jobinfo->node_use;
+		*uint16 = jobinfo->node_use;
 		break;
 	case SELECT_DATA_CONN_TYPE:
-		*tmp_16 = jobinfo->conn_type;
+		*uint16 = jobinfo->conn_type;
 		break;
 	case SELECT_DATA_BLOCK_ID:
 		if ((jobinfo->bg_block_id == NULL)
@@ -612,7 +673,19 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
 			*tmp_char = xstrdup(jobinfo->bg_block_id);
 		break;
 	case SELECT_DATA_QUARTER:
-		*tmp_32 = jobinfo->quarter;
+		*uint16 = jobinfo->quarter;
+		break;
+	case SELECT_DATA_SEGMENT:
+		*uint16 = jobinfo->segment;
+		break;
+	case SELECT_DATA_NODE_CNT:
+		*uint32 = jobinfo->node_cnt;
+		break;
+	case SELECT_DATA_ALTERED:
+		*uint16 = jobinfo->altered;
+		break;
+	case SELECT_DATA_MAX_PROCS:
+		*uint32 = jobinfo->max_procs;
 		break;
 	default:
 		debug("select_g_get_jobinfo data_type %d invalid", 
@@ -630,21 +703,27 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
 extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo)
 {
 	struct select_jobinfo *rc = NULL;
-
+	int i;
+		
 	if (jobinfo == NULL)
 		;
 	else if (jobinfo->magic != JOBINFO_MAGIC)
 		error("select_g_copy_jobinfo: jobinfo magic bad");
 	else {
-		int i;
 		rc = xmalloc(sizeof(struct select_jobinfo));
-		rc->magic = JOBINFO_MAGIC;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			rc->geometry[i] = jobinfo->geometry[i];
-		rc->rotate = jobinfo->rotate;
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+			rc->geometry[i] = (uint16_t)jobinfo->geometry[i];
+		}
 		rc->conn_type = jobinfo->conn_type;
 		rc->rotate = jobinfo->rotate;
+		rc->node_use = jobinfo->node_use;
 		rc->bg_block_id = xstrdup(jobinfo->bg_block_id);
+		rc->magic = JOBINFO_MAGIC;
+		rc->quarter = jobinfo->quarter;
+		rc->segment = jobinfo->segment;
+		rc->node_cnt = jobinfo->node_cnt;
+		rc->altered = jobinfo->altered;
+		rc->max_procs = jobinfo->max_procs;
 	}
 
 	return rc;
@@ -681,15 +760,23 @@ extern int  select_g_pack_jobinfo  (select_jobinfo_t jobinfo, Buf buffer)
 	int i;
 
 	if (jobinfo) {
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			pack16(jobinfo->geometry[i], buffer);		
-		pack16(jobinfo->conn_type, buffer);
-		pack16(jobinfo->rotate, buffer);
+		/* NOTE: If new elements are added here, make sure to 
+		 * add equivalant pack of zeros below for NULL pointer */
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+			pack16((uint16_t)jobinfo->geometry[i], buffer);
+		}
+		pack16((uint16_t)jobinfo->conn_type, buffer);
+		pack16((uint16_t)jobinfo->rotate, buffer);
+		pack16((uint16_t)jobinfo->quarter, buffer);
+		pack16((uint16_t)jobinfo->segment, buffer);
+		pack32((uint32_t)jobinfo->node_cnt, buffer);
+		pack32((uint32_t)jobinfo->max_procs, buffer);
 		packstr(jobinfo->bg_block_id, buffer);
-		pack32(jobinfo->quarter, buffer);
 	} else {
-		for (i=0; i<(SYSTEM_DIMENSIONS+3); i++)
+		for (i=0; i<(SYSTEM_DIMENSIONS+4); i++)
 			pack16((uint16_t) 0, buffer);
+		pack32((uint32_t) 0, buffer);
+		pack32((uint32_t) 0, buffer);
 		packstr(NULL, buffer);
 	}
 
@@ -707,12 +794,16 @@ extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer)
 	int i;
 	uint16_t uint16_tmp;
 
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+	for (i=0; i<SYSTEM_DIMENSIONS; i++) {
 		safe_unpack16(&(jobinfo->geometry[i]), buffer);
+	}
 	safe_unpack16(&(jobinfo->conn_type), buffer);
 	safe_unpack16(&(jobinfo->rotate), buffer);
+	safe_unpack16(&(jobinfo->quarter), buffer);
+	safe_unpack16(&(jobinfo->segment), buffer);
+	safe_unpack32(&(jobinfo->node_cnt), buffer);
+	safe_unpack32(&(jobinfo->max_procs), buffer);
 	safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint16_tmp, buffer);
-	safe_unpack32(&(jobinfo->quarter), buffer);
 	return SLURM_SUCCESS;
 
       unpack_error:
@@ -731,6 +822,7 @@ extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
 {
 	uint16_t geometry[SYSTEM_DIMENSIONS];
 	int i;
+	char tmp_char[7];
 
 	if (buf == NULL) {
 		error("select_g_sprint_jobinfo: buf is null");
@@ -759,22 +851,26 @@ extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
 	switch (mode) {
 	case SELECT_PRINT_HEAD:
 		snprintf(buf, size,
-			 "CONNECT ROTATE GEOMETRY PART_ID");
+			 "CONNECT ROTATE MAX_PROCS GEOMETRY PART_ID");
 		break;
 	case SELECT_PRINT_DATA:
+		convert_to_kilo(jobinfo->max_procs, tmp_char);
 		snprintf(buf, size, 
-			 "%7.7s %6.6s %8.8s %ux%ux%u %16s",
+			 "%7.7s %6.6s %9s    %1ux%1ux%1u %-16s",
 			 _job_conn_type_string(jobinfo->conn_type),
 			 _job_rotate_string(jobinfo->rotate),
+			 tmp_char,
 			 geometry[0], geometry[1], geometry[2],
 			 jobinfo->bg_block_id);
 		break;
 	case SELECT_PRINT_MIXED:
+		convert_to_kilo(jobinfo->max_procs, tmp_char);
 		snprintf(buf, size, 
-			 "Connection=%s Rotate=%s "
+			 "Connection=%s Rotate=%s MaxProcs=%s "
 			 "Geometry=%ux%ux%u Part_ID=%s",
 			 _job_conn_type_string(jobinfo->conn_type),
 			 _job_rotate_string(jobinfo->rotate),
+			 tmp_char,
 			 geometry[0], geometry[1], geometry[2],
 			 jobinfo->bg_block_id);
 		break;
@@ -798,6 +894,7 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer)
 {
 	uint16_t uint16_tmp;
 	uint32_t uint32_tmp;
+	
 	safe_unpackstr_xmalloc(&(bg_info_record->nodes), &uint16_tmp, buffer);
 	safe_unpackstr_xmalloc(&bg_info_record->owner_name, &uint16_tmp, 
 		buffer);
@@ -810,29 +907,27 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer)
 	bg_info_record->conn_type = (int) uint16_tmp;
 	safe_unpack16(&uint16_tmp, buffer);
 	bg_info_record->node_use = (int) uint16_tmp;
+	safe_unpack16(&uint16_tmp, buffer);
+	bg_info_record->quarter = (int) uint16_tmp;
+	safe_unpack16(&uint16_tmp, buffer);
+	bg_info_record->segment = (int) uint16_tmp;
 	safe_unpack32(&uint32_tmp, buffer);
-	bg_info_record->quarter = (int) uint32_tmp;
-
+	bg_info_record->node_cnt = (int) uint32_tmp;
+		
 	return SLURM_SUCCESS;
 
 unpack_error:
-	if(bg_info_record->nodes)
-		xfree(bg_info_record->nodes);
-	if(bg_info_record->owner_name)
-		xfree(bg_info_record->owner_name);
-	if(bg_info_record->bg_block_id)
-		xfree(bg_info_record->bg_block_id);
+	xfree(bg_info_record->nodes);
+	xfree(bg_info_record->owner_name);
+	xfree(bg_info_record->bg_block_id);
 	return SLURM_ERROR;
 }
 
 static void _free_node_info(bg_info_record_t *bg_info_record)
 {
-	if(bg_info_record->nodes)
-		xfree(bg_info_record->nodes);
-	if(bg_info_record->owner_name)
-		xfree(bg_info_record->owner_name);
-	if(bg_info_record->bg_block_id)
-		xfree(bg_info_record->bg_block_id);
+	xfree(bg_info_record->nodes);
+	xfree(bg_info_record->owner_name);
+	xfree(bg_info_record->bg_block_id);
 }
 
 /* Unpack node select info from a buffer */
diff --git a/src/common/node_select.h b/src/common/node_select.h
index cd1102985f7..5da7008bbd1 100644
--- a/src/common/node_select.h
+++ b/src/common/node_select.h
@@ -74,14 +74,16 @@ extern int select_g_node_init(struct node_record *node_ptr, int node_cnt);
  * IN/OUT data  - the data to get from node record
  */
 extern int select_g_get_select_nodeinfo (struct node_record *node_ptr, 
-                                         enum select_data_info cr_info, void *data);
+                                         enum select_data_info cr_info, 
+					 void *data);
 
 /* 
  * Update select data for a specific node record for a specific job 
  * IN cr_info   - type of data to update for a given job record
  * IN job_ptr - current job record
  */
-extern int select_g_update_nodeinfo (struct job_record *job_ptr, enum select_data_info cr_info);
+extern int select_g_update_nodeinfo (struct job_record *job_ptr, 
+				     enum select_data_info cr_info);
 
 /* 
  * Get select data from a plugin
@@ -89,7 +91,14 @@ extern int select_g_update_nodeinfo (struct job_record *job_ptr, enum select_dat
  * IN cr_info   - type of data to get from the node record (see enum select_data_info)
  * IN/OUT data  - the data to get from node record
  */
-extern int select_g_get_info_from_plugin (enum select_data_info cr_info, void *data);
+extern int select_g_get_info_from_plugin (enum select_data_info cr_info, 
+					  void *data);
+
+/* 
+ * Alter the node count for a job given the type of system we are on
+ * IN/OUT job_desc  - current job desc
+ */
+extern int select_g_alter_node_cnt (enum select_node_cnt type, void *data);
 
 /*
  * Note re/initialization of partition record data structure
@@ -117,7 +126,7 @@ extern int select_g_job_init(List job_list);
  * IN test_only - if true, only test if ever could run, not necessarily now
  */
 extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-	int min_nodes, int max_nodes, bool test_only);
+			     int min_nodes, int max_nodes, bool test_only);
 
 /*
  * Note initiation of job is about to begin. Called immediately 
@@ -166,7 +175,7 @@ extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo);
  * IN data - the data to enter into job credential
  */
 extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
-		enum select_data_type data_type, void *data);
+				 enum select_data_type data_type, void *data);
 
 /* get data from a select job credential
  * IN jobinfo  - updated select job credential
@@ -175,7 +184,7 @@ extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
  *	data for data_tyep == SELECT_DATA_PART_ID
  */
 extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
-		enum select_data_type data_type, void *data);
+				 enum select_data_type data_type, void *data);
 
 /* copy a select job credential
  * IN jobinfo - the select job credential to be copied
@@ -198,7 +207,7 @@ extern int select_g_free_jobinfo  (select_jobinfo_t *jobinfo);
  * IN/OUT data  - the data to get from node record
  */
 extern int select_g_get_extra_jobinfo (struct node_record *node_ptr, 
-                                      struct job_record *job_ptr, 
+				       struct job_record *job_ptr, 
                                        enum select_data_info cr_info,
                                        void *data);
 
@@ -225,7 +234,7 @@ extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer);
  * RET        - the string, same as buf
  */
 extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
-		char *buf, size_t size, int mode);
+				     char *buf, size_t size, int mode);
 
 /******************************************************\
  * NODE-SELECT PLUGIN SPECIFIC INFORMATION FUNCTIONS  *
@@ -241,10 +250,10 @@ extern int select_g_pack_node_info(time_t last_query_time, Buf *buffer);
  
 /* Unpack node select info from a buffer */
 extern int select_g_unpack_node_info(node_select_info_msg_t **
-		node_select_info_msg_pptr, Buf buffer);
+				     node_select_info_msg_pptr, Buf buffer);
 
 /* Free a node select information buffer */
 extern int select_g_free_node_info(node_select_info_msg_t **
-		node_select_info_msg_pptr);
+				   node_select_info_msg_pptr);
 
 #endif /*__SELECT_PLUGIN_API_H__*/
diff --git a/src/common/slurm_auth.c b/src/common/slurm_auth.c
index 46b8e8833b6..350ba5910cf 100644
--- a/src/common/slurm_auth.c
+++ b/src/common/slurm_auth.c
@@ -351,7 +351,7 @@ g_slurm_auth_create( void *hosts, int timeout )
         if ( ( argv = slurm_auth_marshal_args( hosts, timeout ) ) == NULL ) {
                 return NULL;
         }
-
+       
         ret = (*(g_context->ops.create))( argv );
         xfree( argv );
         return ret;
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index ea546e983d2..1c19f5afeaa 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -763,7 +763,7 @@ List slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout)
 			error("problem with forward msg");
 		}
 	}
-
+	
 	if ((auth_cred = g_slurm_auth_unpack(buffer)) == NULL) {
 		error( "authentication: %s ",
 			g_slurm_auth_errstr(g_slurm_auth_errno(NULL)));
@@ -776,7 +776,7 @@ List slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout)
 	if (rc != SLURM_SUCCESS) {
 		error( "authentication: %s ",
 		       g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
-		(void) g_slurm_auth_destroy(auth_cred);
+		(void) slurm_free_cred(auth_cred);
 		free_buf(buffer);
 		rc = SLURM_PROTOCOL_AUTHENTICATION_ERROR;
 		goto total_return;
@@ -790,12 +790,12 @@ List slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout)
 	
 	if ( (header.body_length > remaining_buf(buffer)) ||
 	     (unpack_msg(msg, buffer) != SLURM_SUCCESS) ) {
-		(void) g_slurm_auth_destroy(auth_cred);
+		(void) slurm_free_cred(auth_cred);
 		free_buf(buffer);
 		rc = ESLURM_PROTOCOL_INCOMPLETE_PACKET;
 		goto total_return;
 	}
-
+	
 	msg->cred = (void *) auth_cred;
 
 	free_buf(buffer);
@@ -901,7 +901,7 @@ int slurm_add_header_and_send(slurm_fd fd, slurm_msg_t *msg)
 	 * Pack auth credential
 	 */
 	rc = g_slurm_auth_pack(auth_cred, send_buf);
-	(void) g_slurm_auth_destroy(auth_cred);
+	(void) slurm_free_cred(auth_cred);
 	if (rc) {
 		error("authentication: %s",
 		       g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
@@ -975,7 +975,7 @@ int slurm_send_node_msg(slurm_fd fd, slurm_msg_t * msg)
 	 * Pack auth credential
 	 */
 	rc = g_slurm_auth_pack(auth_cred, buffer);
-	(void) g_slurm_auth_destroy(auth_cred);
+	(void) slurm_free_cred(auth_cred);
 	if (rc) {
 		error("authentication: %s",
 		       g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
@@ -1389,15 +1389,20 @@ int slurm_send_recv_controller_msg(slurm_msg_t *req, slurm_msg_t *resp)
 		retry = 0;
 		/* If the backup controller is in the process of assuming 
 		 * control, we sleep and retry later */
+		
 		ret_list = _send_and_recv_msg(fd, req, resp, 0);
+		
 		if(errno == SLURM_SUCCESS)
 			slurm_free_cred(resp->cred);
 
 		rc = errno;
-		if(ret_list && (list_count(ret_list)>0)) {
-			error("We didn't do things correctly "
-			      "missed %d responses",
-			      list_count(ret_list));
+
+		if(ret_list) {
+			if(list_count(ret_list)>0) {
+				error("We didn't do things correctly "
+				      "missed %d responses",
+				      list_count(ret_list));
+			}
 			list_destroy(ret_list);
 		}
 		
@@ -1544,7 +1549,7 @@ static List _send_recv_rc_msg(slurm_fd fd, slurm_msg_t *req, int timeout)
 	else {
 		msg_rc = ((return_code_msg_t *)msg.data)->return_code;
 		slurm_free_return_code_msg(msg.data);
-		g_slurm_auth_destroy(msg.cred);
+		slurm_free_cred(msg.cred);
 	}
 	ret_data_info = xmalloc(sizeof(ret_data_info_t));
 	ret_data_info->node_name = xstrdup("localhost");
@@ -1630,7 +1635,7 @@ failed:
 	else {
 		msg_rc = ((return_code_msg_t *)resp.data)->return_code;
 		slurm_free_return_code_msg(resp.data);
-		g_slurm_auth_destroy(resp.cred);
+		slurm_free_cred(resp.cred);
 	}
 	ret_data_info = xmalloc(sizeof(ret_data_info_t));
 	ret_data_info->node_name = xstrdup("localhost");
@@ -1786,7 +1791,7 @@ extern int *set_span(int total)
  */
 void slurm_free_msg(slurm_msg_t * msg)
 {
-	(void) g_slurm_auth_destroy(msg->cred);
+	(void) slurm_free_cred(msg->cred);
 	if(msg->ret_list) {
 		list_destroy(msg->ret_list);
 		msg->ret_list = NULL;
@@ -1802,6 +1807,23 @@ void slurm_free_cred(void *cred)
 	(void) g_slurm_auth_destroy(cred);
 }
 
+int convert_to_kilo(int number, char *tmp)
+{
+	int i;
+	if(number >= 1024) {
+		i = number % 1024;
+		if(i > 0) {
+			i *= 10;
+			i /= 1024;
+			sprintf(tmp, "%d.%dk\0", number/1024, i);
+		} else 
+			sprintf(tmp, "%dk\0", number/1024);
+	} else
+		sprintf(tmp, "%d\0", number);
+
+	return SLURM_SUCCESS;
+}
+
 #if _DEBUG
 
 static void _print_data(char *data, int len)
diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h
index 6f3ad3dba6a..557948d9ec5 100644
--- a/src/common/slurm_protocol_api.h
+++ b/src/common/slurm_protocol_api.h
@@ -581,4 +581,5 @@ int slurm_send_only_node_msg(slurm_msg_t * request_msg);
 int *set_span(int total);
 void slurm_free_msg(slurm_msg_t * msg);
 void slurm_free_cred(void *cred);
+int convert_to_kilo(int number, char *tmp);
 #endif
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index e79703d9e0f..919e49e938f 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -293,11 +293,11 @@ void
 pack_header(header_t * header, Buf buffer)
 {
 	
-	pack16(header->version, buffer);
-	pack16(header->flags, buffer);
+	pack16((uint16_t)header->version, buffer);
+	pack16((uint16_t)header->flags, buffer);
 	pack16((uint16_t) header->msg_type, buffer);
-	pack32(header->body_length, buffer);
-	pack16(header->forward.cnt, buffer);
+	pack32((uint32_t)header->body_length, buffer);
+	pack16((uint16_t)header->forward.cnt, buffer);
 	if (header->forward.cnt > 0) {
 		_pack_slurm_addr_array(header->forward.addr,
 				       header->forward.cnt, buffer);
@@ -307,14 +307,14 @@ pack_header(header_t * header, Buf buffer)
 		pack32_array(header->forward.node_id, 
 			     header->forward.cnt, 
 			     buffer);
-		pack32(header->forward.timeout, buffer);
+		pack32((uint32_t)header->forward.timeout, buffer);
 	}
-	pack16(header->ret_cnt, buffer);	
+	pack16((uint16_t)header->ret_cnt, buffer);	
 	if(header->ret_cnt > 0) {
 		_pack_ret_list(header->ret_list,
 			       header->ret_cnt, buffer);		
 	}
-	pack32(header->srun_node_id, buffer);
+	pack32((uint32_t)header->srun_node_id, buffer);
 	slurm_pack_slurm_addr(&header->orig_addr, buffer);
 }
 
@@ -944,7 +944,7 @@ _pack_update_node_msg(update_node_msg_t * msg, Buf buffer)
 	xassert(msg != NULL);
 
 	packstr(msg->node_names, buffer);
-	pack16(msg->node_state, buffer);
+	pack16((uint16_t)msg->node_state, buffer);
 	packstr(msg->reason, buffer);
 }
 
@@ -980,19 +980,19 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 	xassert(msg != NULL);
 
 	pack_time(msg->timestamp, buffer);
-	pack32(msg->status, buffer);
+	pack32((uint32_t)msg->status, buffer);
 	packstr(msg->node_name, buffer);
-	pack32(msg->cpus, buffer);
-	pack32(msg->real_memory_size, buffer);
-	pack32(msg->temporary_disk_space, buffer);
-	pack32(msg->job_count, buffer);
+	pack32((uint32_t)msg->cpus, buffer);
+	pack32((uint32_t)msg->real_memory_size, buffer);
+	pack32((uint32_t)msg->temporary_disk_space, buffer);
+	pack32((uint32_t)msg->job_count, buffer);
 	for (i = 0; i < msg->job_count; i++) {
-		pack32(msg->job_id[i], buffer);
+		pack32((uint32_t)msg->job_id[i], buffer);
 	}
 	for (i = 0; i < msg->job_count; i++) {
-		pack16(msg->step_id[i], buffer);
+		pack16((uint16_t)msg->step_id[i], buffer);
 	}
-	pack16(msg->startup, buffer);
+	pack16((uint16_t)msg->startup, buffer);
 	if (msg->startup)
 		switch_g_pack_node_info(msg->switch_nodeinfo, buffer);
 }
@@ -1054,16 +1054,20 @@ _pack_resource_allocation_response_msg(resource_allocation_response_msg_t *
 {
 	xassert(msg != NULL);
 
-	pack32(msg->error_code, buffer);
-	pack32(msg->job_id, buffer);
+	pack32((uint32_t)msg->error_code, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
 	packstr(msg->node_list, buffer);
 
-	pack16(msg->num_cpu_groups, buffer);
-	pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
-	pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
+	pack16((uint16_t)msg->num_cpu_groups, buffer);
+	if (msg->num_cpu_groups) {
+		pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
+		pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
+	}
+
+	pack16((uint16_t)msg->node_cnt, buffer);
+	if (msg->node_cnt > 0)
+		_pack_slurm_addr_array(msg->node_addr, msg->node_cnt, buffer);
 
-	pack16(msg->node_cnt, buffer);
-	_pack_slurm_addr_array(msg->node_addr, msg->node_cnt, buffer);
 	select_g_pack_jobinfo(msg->select_jobinfo, buffer);
 }
 
@@ -1133,15 +1137,18 @@ static void
     (resource_allocation_and_run_response_msg_t * msg, Buf buffer) {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
 	packstr(msg->node_list, buffer);
-	pack16(msg->num_cpu_groups, buffer);
-	pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
-	pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
-	pack32(msg->job_step_id, buffer);
+	pack16((uint16_t)msg->num_cpu_groups, buffer);
+	if (msg->num_cpu_groups > 0) {
+		pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
+		pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
+	}
+	pack32((uint32_t)msg->job_step_id, buffer);
 
-	pack16(msg->node_cnt, buffer);
-	_pack_slurm_addr_array(msg->node_addr, msg->node_cnt, buffer);
+	pack16((uint16_t)msg->node_cnt, buffer);
+	if (msg->node_cnt)
+		_pack_slurm_addr_array(msg->node_addr, msg->node_cnt, buffer);
 
 	slurm_cred_pack(msg->cred, buffer);
 	switch_pack_jobinfo(msg->switch_job, buffer);
@@ -1217,9 +1224,9 @@ _pack_submit_response_msg(submit_response_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
-	pack32(msg->step_id, buffer);
-	pack32(msg->error_code, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->step_id, buffer);
+	pack32((uint32_t)msg->error_code, buffer);
 }
 
 static int
@@ -1314,17 +1321,17 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer)
 	xassert(msg != NULL);
 
 	packstr(msg->allow_groups, buffer);
-	pack16(msg-> default_part, buffer);
-	pack32(msg-> max_time,     buffer);
-	pack32(msg-> max_nodes,    buffer);
-	pack32(msg-> min_nodes,    buffer);
+	pack16((uint16_t)msg-> default_part, buffer);
+	pack32((uint32_t)msg-> max_time,     buffer);
+	pack32((uint32_t)msg-> max_nodes,    buffer);
+	pack32((uint32_t)msg-> min_nodes,    buffer);
 	packstr(msg->name,         buffer);
 	packstr(msg->nodes,        buffer);
 
-	pack16(msg-> hidden,      buffer);
-	pack16(msg-> root_only,    buffer);
-	pack16(msg-> shared,       buffer);
-	pack16(msg-> state_up,     buffer);
+	pack16((uint16_t)msg-> hidden,      buffer);
+	pack16((uint16_t)msg-> root_only,    buffer);
+	pack16((uint16_t)msg-> shared,       buffer);
+	pack16((uint16_t)msg-> state_up,     buffer);
 }
 
 static int
@@ -1397,15 +1404,15 @@ _pack_job_step_create_request_msg(job_step_create_request_msg_t
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
-	pack32(msg->user_id, buffer);
-	pack32(msg->node_count, buffer);
-	pack32(msg->cpu_count, buffer);
-	pack32(msg->num_tasks, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->user_id, buffer);
+	pack32((uint32_t)msg->node_count, buffer);
+	pack32((uint32_t)msg->cpu_count, buffer);
+	pack32((uint32_t)msg->num_tasks, buffer);
 
-	pack16(msg->relative, buffer);
-	pack16(msg->task_dist, buffer);
-	pack16(msg->port, buffer);
+	pack16((uint16_t)msg->relative, buffer);
+	pack16((uint16_t)msg->task_dist, buffer);
+	pack16((uint16_t)msg->port, buffer);
 	packstr(msg->host, buffer);
 	packstr(msg->name, buffer);
 	packstr(msg->network, buffer);
@@ -1455,8 +1462,8 @@ _pack_kill_job_msg(kill_job_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id,  buffer);
-	pack32(msg->job_uid, buffer);
+	pack32((uint32_t)msg->job_id,  buffer);
+	pack32((uint32_t)msg->job_uid, buffer);
 	select_g_pack_jobinfo(msg->select_jobinfo, buffer);
 }
 
@@ -1490,8 +1497,8 @@ _pack_signal_job_msg(signal_job_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id,  buffer);
-	pack32(msg->signal, buffer);
+	pack32((uint32_t)msg->job_id,  buffer);
+	pack32((uint32_t)msg->signal, buffer);
 	debug("_pack_signal_job_msg signal = %d", msg->signal);
 }
 
@@ -1522,8 +1529,8 @@ _pack_epilog_comp_msg(epilog_complete_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
-	pack32(msg->return_code, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->return_code, buffer);
 	packstr(msg->node_name, buffer);
 	switch_g_pack_node_info(msg->switch_nodeinfo, buffer);
 }
@@ -1561,8 +1568,8 @@ _pack_update_job_time_msg(job_time_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
-	pack_time((uint32_t) msg->expiration_time, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack_time(msg->expiration_time, buffer);
 }
 
 static int
@@ -1591,7 +1598,7 @@ _pack_job_step_create_response_msg(job_step_create_response_msg_t * msg,
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_step_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
 	packstr(msg->node_list, buffer);
 	slurm_cred_pack(msg->cred, buffer);
 	switch_pack_jobinfo(msg->switch_job, buffer);
@@ -1683,7 +1690,9 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer)
 	safe_unpack32(&part->max_nodes,    buffer);
 	safe_unpack32(&part->min_nodes,    buffer);
 	safe_unpack32(&part->total_nodes,  buffer);
-
+	safe_unpack32(&part->max_offset,   buffer);
+	safe_unpack32(&part->min_offset,   buffer);
+	
 	safe_unpack32(&part->total_cpus,   buffer);
 	safe_unpack16(&part->default_part, buffer);
 	safe_unpack16(&part->hidden,       buffer);
@@ -1724,10 +1733,10 @@ pack_job_step_info_members(uint32_t job_id, uint16_t step_id,
 			   char *nodes, char *name, char *network,
 			   Buf buffer)
 {
-	pack32(job_id, buffer);
-	pack16(step_id, buffer);
-	pack32(user_id, buffer);
-	pack32(num_tasks, buffer);
+	pack32((uint32_t)job_id, buffer);
+	pack16((uint16_t)step_id, buffer);
+	pack32((uint32_t)user_id, buffer);
+	pack32((uint32_t)num_tasks, buffer);
 
 	pack_time(start_time, buffer);
 	packstr(partition, buffer);
@@ -1955,50 +1964,50 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	packstr(build_ptr->authtype, buffer);
 	packstr(build_ptr->backup_addr, buffer);
 	packstr(build_ptr->backup_controller, buffer);
-	pack16(build_ptr->cache_groups, buffer);
+	pack16((uint16_t)build_ptr->cache_groups, buffer);
 	packstr(build_ptr->checkpoint_type, buffer);
 	packstr(build_ptr->control_addr, buffer);
 	packstr(build_ptr->control_machine, buffer);
 	packstr(build_ptr->epilog, buffer);
-	pack16(build_ptr->fast_schedule, buffer);
-	pack32(build_ptr->first_job_id, buffer);
-	pack16(build_ptr->inactive_limit, buffer);
+	pack16((uint16_t)build_ptr->fast_schedule, buffer);
+	pack32((uint32_t)build_ptr->first_job_id, buffer);
+	pack16((uint16_t)build_ptr->inactive_limit, buffer);
 	packstr(build_ptr->job_acct_loc, buffer);
 	packstr(build_ptr->job_acct_parameters, buffer);
 	packstr(build_ptr->job_acct_type, buffer);
 	packstr(build_ptr->job_comp_loc, buffer);
 	packstr(build_ptr->job_comp_type, buffer);
-	pack16(build_ptr->kill_wait, buffer);
-	pack16(build_ptr->max_job_cnt, buffer);
-	pack16(build_ptr->min_job_age, buffer);
+	pack16((uint16_t)build_ptr->kill_wait, buffer);
+	pack16((uint16_t)build_ptr->max_job_cnt, buffer);
+	pack16((uint16_t)build_ptr->min_job_age, buffer);
 	packstr(build_ptr->mpi_default, buffer);
 	packstr(build_ptr->plugindir, buffer);
 	packstr(build_ptr->proctrack_type, buffer);
 	packstr(build_ptr->prolog, buffer);
         packstr(build_ptr->propagate_rlimits, buffer);
         packstr(build_ptr->propagate_rlimits_except, buffer);
-	pack16(build_ptr->ret2service, buffer);
+	pack16((uint16_t)build_ptr->ret2service, buffer);
 	packstr(build_ptr->schedauth, buffer);
-	pack16(build_ptr->schedport, buffer);
-	pack16(build_ptr->schedrootfltr, buffer);
+	pack16((uint16_t)build_ptr->schedport, buffer);
+	pack16((uint16_t)build_ptr->schedrootfltr, buffer);
 	packstr(build_ptr->schedtype, buffer);
 	packstr(build_ptr->select_type, buffer);
-	pack32(build_ptr->slurm_user_id, buffer);
+	pack32((uint32_t)build_ptr->slurm_user_id, buffer);
 	packstr(build_ptr->slurm_user_name, buffer);
-	pack16(build_ptr->slurmctld_debug, buffer);
+	pack16((uint16_t)build_ptr->slurmctld_debug, buffer);
 	packstr(build_ptr->slurmctld_logfile, buffer);
 	packstr(build_ptr->slurmctld_pidfile, buffer);
-	pack32(build_ptr->slurmctld_port, buffer);
-	pack16(build_ptr->slurmctld_timeout, buffer);
-	pack16(build_ptr->slurmd_debug, buffer);
+	pack32((uint32_t)build_ptr->slurmctld_port, buffer);
+	pack16((uint16_t)build_ptr->slurmctld_timeout, buffer);
+	pack16((uint16_t)build_ptr->slurmd_debug, buffer);
 	packstr(build_ptr->slurmd_logfile, buffer);
 	packstr(build_ptr->slurmd_pidfile, buffer);
 #ifndef MULTIPLE_SLURMD
-	pack32(build_ptr->slurmd_port, buffer);
+	pack32((uint32_t)build_ptr->slurmd_port, buffer);
 #endif
 	packstr(build_ptr->slurmd_spooldir, buffer);
 	debug2("Packing string %s", build_ptr->slurmd_spooldir);
-	pack16(build_ptr->slurmd_timeout, buffer);
+	pack16((uint16_t)build_ptr->slurmd_timeout, buffer);
 	packstr(build_ptr->slurm_conf, buffer);
 	packstr(build_ptr->state_save_location, buffer);
 	packstr(build_ptr->switch_type, buffer);
@@ -2006,13 +2015,13 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	packstr(build_ptr->task_prolog, buffer);
 	packstr(build_ptr->task_plugin, buffer);
 	packstr(build_ptr->tmp_fs, buffer);
-	pack16(build_ptr->wait_time, buffer);
+	pack16((uint16_t)build_ptr->wait_time, buffer);
 	packstr(build_ptr->job_credential_private_key, buffer);
 	packstr(build_ptr->job_credential_public_certificate, buffer);
 	packstr(build_ptr->srun_prolog, buffer);
 	packstr(build_ptr->srun_epilog, buffer);
 	packstr(build_ptr->node_prefix, buffer);
-	pack16(build_ptr->tree_width, buffer);
+	pack16((uint16_t)build_ptr->tree_width, buffer);
 }
 
 static int
@@ -2165,24 +2174,24 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	select_jobinfo_t jobinfo;
 
 	/* load the data values */
-	pack16(job_desc_ptr->contiguous, buffer);
-        pack16(job_desc_ptr->exclusive, buffer);
-	pack16(job_desc_ptr->kill_on_node_fail, buffer);
+	pack16((uint16_t)job_desc_ptr->contiguous, buffer);
+        pack16((uint16_t)job_desc_ptr->exclusive, buffer);
+	pack16((uint16_t)job_desc_ptr->kill_on_node_fail, buffer);
 	packstr(job_desc_ptr->features, buffer);
-	pack32(job_desc_ptr->job_id, buffer);
+	pack32((uint32_t)job_desc_ptr->job_id, buffer);
 	packstr(job_desc_ptr->name, buffer);
 
 	packstr(job_desc_ptr->alloc_node, buffer);
-	pack32(job_desc_ptr->alloc_sid, buffer);
-	pack32(job_desc_ptr->min_procs, buffer);
-	pack32(job_desc_ptr->min_memory, buffer);
-	pack32(job_desc_ptr->min_tmp_disk, buffer);
+	pack32((uint32_t)job_desc_ptr->alloc_sid, buffer);
+	pack32((uint32_t)job_desc_ptr->min_procs, buffer);
+	pack32((uint32_t)job_desc_ptr->min_memory, buffer);
+	pack32((uint32_t)job_desc_ptr->min_tmp_disk, buffer);
 
 	packstr(job_desc_ptr->partition, buffer);
-	pack32(job_desc_ptr->priority, buffer);
-	pack32(job_desc_ptr->dependency, buffer);
+	pack32((uint32_t)job_desc_ptr->priority, buffer);
+	pack32((uint32_t)job_desc_ptr->dependency, buffer);
 	packstr(job_desc_ptr->account, buffer);
-	pack16(job_desc_ptr->nice, buffer);
+	pack16((uint16_t)job_desc_ptr->nice, buffer);
 
 	packstr(job_desc_ptr->req_nodes, buffer);
 	packstr(job_desc_ptr->exc_nodes, buffer);
@@ -2196,39 +2205,27 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	packstr(job_desc_ptr->out, buffer);
 	packstr(job_desc_ptr->work_dir, buffer);
 
-	pack16(job_desc_ptr->immediate, buffer);
-	pack16(job_desc_ptr->shared, buffer);
-	pack16(job_desc_ptr->cpus_per_task, buffer);
-	pack16(job_desc_ptr->task_dist, buffer);
-	pack32(job_desc_ptr->time_limit, buffer);
+	pack16((uint16_t)job_desc_ptr->immediate, buffer);
+	pack16((uint16_t)job_desc_ptr->shared, buffer);
+	pack16((uint16_t)job_desc_ptr->cpus_per_task, buffer);
+	pack16((uint16_t)job_desc_ptr->task_dist, buffer);
+	pack32((uint32_t)job_desc_ptr->time_limit, buffer);
 
-	pack32(job_desc_ptr->num_procs, buffer);
-	pack32(job_desc_ptr->min_nodes, buffer);
-	pack32(job_desc_ptr->max_nodes, buffer);
-	pack32(job_desc_ptr->num_tasks, buffer);
-	pack32(job_desc_ptr->user_id, buffer);
-	pack32(job_desc_ptr->group_id, buffer);
+	pack32((uint32_t)job_desc_ptr->num_procs, buffer);
+	pack32((uint32_t)job_desc_ptr->min_nodes, buffer);
+	pack32((uint32_t)job_desc_ptr->max_nodes, buffer);
+	pack32((uint32_t)job_desc_ptr->num_tasks, buffer);
+	pack32((uint32_t)job_desc_ptr->user_id, buffer);
+	pack32((uint32_t)job_desc_ptr->group_id, buffer);
 
-	pack16(job_desc_ptr->port, buffer);
+	pack16((uint16_t)job_desc_ptr->port, buffer);
 	packstr(job_desc_ptr->host, buffer);
 	packstr(job_desc_ptr->network, buffer);
 	pack_time(job_desc_ptr->begin_time, buffer);
 
-	pack16(job_desc_ptr->mail_type, buffer);
+	pack16((uint16_t)job_desc_ptr->mail_type, buffer);
 	packstr(job_desc_ptr->mail_user, buffer);
-
-	if (select_g_alloc_jobinfo (&jobinfo) == SLURM_SUCCESS) {
-#if SYSTEM_DIMENSIONS
-		select_g_set_jobinfo(jobinfo, SELECT_DATA_GEOMETRY, 
-			job_desc_ptr->geometry);
-#endif
-		select_g_set_jobinfo(jobinfo, SELECT_DATA_CONN_TYPE, 
-			&(job_desc_ptr->conn_type));
-		select_g_set_jobinfo(jobinfo, SELECT_DATA_ROTATE, 
-			&(job_desc_ptr->rotate));
-		select_g_pack_jobinfo(jobinfo, buffer);
-		select_g_free_jobinfo(&jobinfo);
-	}
+	select_g_pack_jobinfo(job_desc_ptr->select_jobinfo, buffer);
 }
 
 /* _unpack_job_desc_msg
@@ -2303,14 +2300,6 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	if (select_g_alloc_jobinfo (&job_desc_ptr->select_jobinfo)
 	||  select_g_unpack_jobinfo(job_desc_ptr->select_jobinfo, buffer))
 		goto unpack_error;
-#if SYSTEM_DIMENSIONS
-	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
-		SELECT_DATA_GEOMETRY, job_desc_ptr->geometry);
-#endif
-	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
-		SELECT_DATA_CONN_TYPE, &job_desc_ptr->conn_type);
-	select_g_get_jobinfo(job_desc_ptr->select_jobinfo, 
-		SELECT_DATA_ROTATE, &job_desc_ptr->rotate);
 	
 	return SLURM_SUCCESS;
 
@@ -2339,7 +2328,7 @@ static void
 _pack_old_job_desc_msg(old_job_alloc_msg_t * job_desc_ptr, Buf buffer)
 {
 	/* load the data values */
-	pack32(job_desc_ptr->job_id, buffer);
+	pack32((uint32_t)job_desc_ptr->job_id, buffer);
 }
 
 static int
@@ -2392,7 +2381,7 @@ static void
 _pack_return_code_msg(return_code_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
-	pack32(msg->return_code, buffer);
+	pack32((uint32_t)msg->return_code, buffer);
 }
 
 static int
@@ -2418,11 +2407,11 @@ _pack_reattach_tasks_request_msg(reattach_tasks_request_msg_t * msg,
 				 Buf buffer)
 {
 	xassert(msg != NULL);
-	pack32(msg->job_id, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack32(msg->srun_node_id, buffer);
-	pack16(msg->resp_port, buffer);
-	pack16(msg->io_port, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
+	pack32((uint32_t)msg->srun_node_id, buffer);
+	pack16((uint16_t)msg->resp_port, buffer);
+	pack16((uint16_t)msg->io_port, buffer);
 	packstr(msg->ofname, buffer);
 	packstr(msg->efname, buffer);
 	packstr(msg->ifname, buffer);
@@ -2467,9 +2456,9 @@ _pack_reattach_tasks_response_msg(reattach_tasks_response_msg_t * msg,
 	xassert(msg != NULL);
 	packstr(msg->node_name,   buffer);
 	packstr(msg->executable_name, buffer);
-	pack32(msg->return_code,  buffer);
-	pack32(msg->srun_node_id, buffer);
-	pack32(msg->ntasks,       buffer);
+	pack32((uint32_t)msg->return_code,  buffer);
+	pack32((uint32_t)msg->srun_node_id, buffer);
+	pack32((uint32_t)msg->ntasks,       buffer);
 	pack32_array(msg->gtids,      msg->ntasks, buffer);
 	pack32_array(msg->local_pids, msg->ntasks, buffer);
 }
@@ -2507,8 +2496,8 @@ static void
 _pack_task_exit_msg(task_exit_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
-	pack32(msg->return_code, buffer);
-	pack32(msg->num_tasks, buffer);
+	pack32((uint32_t)msg->return_code, buffer);
+	pack32((uint32_t)msg->num_tasks, buffer);
 	pack32_array(msg->task_id_list,
 		     msg->num_tasks, buffer);
 }
@@ -2541,10 +2530,10 @@ static void
 _pack_launch_tasks_response_msg(launch_tasks_response_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
-	pack32(msg->return_code, buffer);
+	pack32((uint32_t)msg->return_code, buffer);
 	packstr(msg->node_name, buffer);
-	pack32(msg->srun_node_id, buffer);
-	pack32(msg->count_of_pids, buffer);
+	pack32((uint32_t)msg->srun_node_id, buffer);
+	pack32((uint32_t)msg->count_of_pids, buffer);
 	pack32_array(msg->local_pids,
 		     msg->count_of_pids, buffer);
 }
@@ -2582,19 +2571,19 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer)
 {
 	int i=0;
 	xassert(msg != NULL);
-	pack32(msg->job_id, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack32(msg->nnodes, buffer);
-	pack32(msg->nprocs, buffer);
-	pack32(msg->uid, buffer);
-	pack32(msg->gid, buffer);
-	pack32(msg->srun_node_id, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
+	pack32((uint32_t)msg->nnodes, buffer);
+	pack32((uint32_t)msg->nprocs, buffer);
+	pack32((uint32_t)msg->uid, buffer);
+	pack32((uint32_t)msg->gid, buffer);
+	pack32((uint32_t)msg->srun_node_id, buffer);
 	slurm_cred_pack(msg->cred, buffer);
 	for(i=0; i<msg->nnodes; i++) {
-		pack32(msg->tasks_to_launch[i], buffer);
-		pack32(msg->cpus_allocated[i], buffer);
-		pack16(msg->resp_port[i], buffer);
-		pack16(msg->io_port[i], buffer);
+		pack32((uint32_t)msg->tasks_to_launch[i], buffer);
+		pack32((uint32_t)msg->cpus_allocated[i], buffer);
+		pack16((uint16_t)msg->resp_port[i], buffer);
+		pack16((uint16_t)msg->io_port[i], buffer);
 		pack32_array(msg->global_task_ids[i], 
 			     msg->tasks_to_launch[i], 
 			     buffer);	
@@ -2602,17 +2591,17 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer)
 	slurm_pack_slurm_addr(&msg->orig_addr, buffer);
 	packstr_array(msg->env, msg->envc, buffer);
 	packstr(msg->cwd, buffer);
-	pack32(msg->cpu_bind_type, buffer);
+	pack32((uint32_t)msg->cpu_bind_type, buffer);
 	packstr(msg->cpu_bind, buffer);
 	packstr_array(msg->argv, msg->argc, buffer);
-	pack16(msg->task_flags, buffer);
+	pack16((uint16_t)msg->task_flags, buffer);
 	packstr(msg->ofname, buffer);
 	packstr(msg->efname, buffer);
 	packstr(msg->ifname, buffer);
 	pack8(msg->buffered_stdio, buffer);
 	packstr(msg->task_prolog, buffer);
 	packstr(msg->task_epilog, buffer);
-	pack32(msg->slurmd_debug, buffer);
+	pack32((uint32_t)msg->slurmd_debug, buffer);
 	switch_pack_jobinfo(msg->switch_job, buffer);
 }
 
@@ -2688,22 +2677,22 @@ static void
 _pack_spawn_task_request_msg(spawn_task_request_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
-	pack32(msg->job_id, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack32(msg->nnodes, buffer);
-	pack32(msg->nprocs, buffer);
-	pack32(msg->uid, buffer);
-        pack32(msg->gid, buffer);
-	pack32(msg->srun_node_id, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
+	pack32((uint32_t)msg->nnodes, buffer);
+	pack32((uint32_t)msg->nprocs, buffer);
+	pack32((uint32_t)msg->uid, buffer);
+        pack32((uint32_t)msg->gid, buffer);
+	pack32((uint32_t)msg->srun_node_id, buffer);
 	slurm_cred_pack(msg->cred, buffer);
 	packstr_array(msg->env, msg->envc, buffer);
 	packstr(msg->cwd, buffer);
 	packstr_array(msg->argv, msg->argc, buffer);
-	pack16(msg->io_port, buffer);
-	pack16(msg->task_flags, buffer);
-	pack16(msg->cpus_allocated, buffer);
-	pack32(msg->slurmd_debug, buffer);
-	pack32(msg->global_task_id, buffer);
+	pack16((uint16_t)msg->io_port, buffer);
+	pack16((uint16_t)msg->task_flags, buffer);
+	pack16((uint16_t)msg->cpus_allocated, buffer);
+	pack32((uint32_t)msg->slurmd_debug, buffer);
+	pack32((uint32_t)msg->global_task_id, buffer);
 	switch_pack_jobinfo(msg->switch_job, buffer);
 }
 
@@ -2754,9 +2743,9 @@ _unpack_spawn_task_request_msg(spawn_task_request_msg_t **
 static void
 _pack_cancel_tasks_msg(kill_tasks_msg_t * msg, Buf buffer)
 {
-	pack32(msg->job_id, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack32(msg->signal, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
+	pack32((uint32_t)msg->signal, buffer);
 }
 
 static int
@@ -2781,7 +2770,7 @@ _unpack_cancel_tasks_msg(kill_tasks_msg_t ** msg_ptr, Buf buffer)
 static void
 _pack_shutdown_msg(shutdown_msg_t * msg, Buf buffer)
 {
-	pack16(msg->core, buffer);
+	pack16((uint16_t)msg->core, buffer);
 }
 
 static int
@@ -2810,10 +2799,10 @@ _unpack_shutdown_msg(shutdown_msg_t ** msg_ptr, Buf buffer)
 static void
 _pack_job_step_kill_msg(job_step_kill_msg_t * msg, Buf buffer)
 {
-	pack32(msg->job_id, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack16(msg->signal, buffer);
-	pack16(msg->batch_flag, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
+	pack16((uint16_t)msg->signal, buffer);
+	pack16((uint16_t)msg->batch_flag, buffer);
 }
 
 /* _unpack_job_step_kill_msg
@@ -2845,10 +2834,10 @@ _unpack_job_step_kill_msg(job_step_kill_msg_t ** msg_ptr, Buf buffer)
 static void
 _pack_complete_job_step_msg(complete_job_step_msg_t * msg, Buf buffer)
 {
-	pack32(msg->job_id, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack32(msg->job_rc, buffer);
-	pack32(msg->slurm_rc, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->job_step_id, buffer);
+	pack32((uint32_t)msg->job_rc, buffer);
+	pack32((uint32_t)msg->slurm_rc, buffer);
 	packstr(msg->node_name, buffer);
 }
 
@@ -2878,7 +2867,7 @@ static void
 _pack_job_info_request_msg(job_info_request_msg_t * msg, Buf buffer)
 {
 	pack_time(msg->last_update, buffer);
-	pack16(msg->show_flags, buffer);
+	pack16((uint16_t)msg->show_flags, buffer);
 }
 
 static int
@@ -2928,9 +2917,9 @@ static void
 _pack_job_step_info_req_msg(job_step_info_request_msg_t * msg, Buf buffer)
 {
 	pack_time(msg->last_update, buffer);
-	pack32(msg->job_id, buffer);
-	pack32(msg->step_id, buffer);
-	pack16(msg->show_flags, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->step_id, buffer);
+	pack16((uint16_t)msg->show_flags, buffer);
 }
 
 static int
@@ -2957,7 +2946,7 @@ static void
 _pack_node_info_request_msg(node_info_request_msg_t * msg, Buf buffer)
 {
 	pack_time(msg->last_update, buffer);
-	pack16(msg->show_flags, buffer);
+	pack16((uint16_t)msg->show_flags, buffer);
 }
 
 static int
@@ -2982,7 +2971,7 @@ static void
 _pack_part_info_request_msg(part_info_request_msg_t * msg, Buf buffer)
 {
 	pack_time(msg->last_update, buffer);
-	pack16(msg->show_flags, buffer);
+	pack16((uint16_t)msg->show_flags, buffer);
 }
 
 static int
@@ -3009,7 +2998,7 @@ _pack_slurm_addr_array(slurm_addr * slurm_address,
 {
 	int i = 0;
 	uint16_t nl = htons(size_val);
-	pack16(nl, buffer);
+	pack16((uint16_t)nl, buffer);
 
 	for (i = 0; i < size_val; i++) {
 		slurm_pack_slurm_addr(slurm_address + i, buffer);
@@ -3057,18 +3046,18 @@ _pack_ret_list(List ret_list,
 	
 	itr = list_iterator_create(ret_list);		
 	while((ret_type = list_next(itr)) != NULL) {
-		pack32(ret_type->msg_rc, buffer);
-		pack32(ret_type->err, buffer);
-		pack32(ret_type->type, buffer);
+		pack32((uint32_t)ret_type->msg_rc, buffer);
+		pack32((uint32_t)ret_type->err, buffer);
+		pack32((uint32_t)ret_type->type, buffer);
 		
 		msg.msg_type = ret_type->type;
 
 		i = list_count(ret_type->ret_data_list);
-		pack16(i, buffer);
+		pack16((uint16_t)i, buffer);
 		itr_data = list_iterator_create(ret_type->ret_data_list);
 		while((ret_data_info = list_next(itr_data)) != NULL) {
 			packstr(ret_data_info->node_name, buffer);
-			pack32(ret_data_info->nodeid, buffer);
+			pack32((uint32_t)ret_data_info->nodeid, buffer);
 			msg.data = ret_data_info->data;
 			pack_msg(&msg, buffer);
 		} 
@@ -3122,13 +3111,13 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
-	pack32(msg->step_id, buffer);
-	pack32(msg->uid, buffer);
-	pack32(msg->gid, buffer);
-	pack32(msg->nprocs, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
+	pack32((uint32_t)msg->step_id, buffer);
+	pack32((uint32_t)msg->uid, buffer);
+	pack32((uint32_t)msg->gid, buffer);
+	pack32((uint32_t)msg->nprocs, buffer);
 
-	pack16(msg->num_cpu_groups, buffer);
+	pack16((uint16_t)msg->num_cpu_groups, buffer);
 	pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
 	pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
 
@@ -3140,10 +3129,10 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer)
 	packstr(msg->in, buffer);
 	packstr(msg->out, buffer);
 
-	pack16(msg->argc, buffer);
+	pack16((uint16_t)msg->argc, buffer);
 	packstr_array(msg->argv, msg->argc, buffer);
 
-	pack16(msg->envc, buffer);
+	pack16((uint16_t)msg->envc, buffer);
 	packstr_array(msg->environment, msg->envc, buffer);
 
 	select_g_pack_jobinfo(msg->select_jobinfo, buffer);
@@ -3223,7 +3212,7 @@ _pack_job_id_request_msg(job_id_request_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_pid, buffer);
+	pack32((uint32_t)msg->job_pid, buffer);
 }
 
 static int
@@ -3251,7 +3240,7 @@ _pack_job_id_response_msg(job_id_response_msg_t * msg, Buf buffer)
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
+	pack32((uint32_t)msg->job_id, buffer);
 }
 
 static int
@@ -3279,8 +3268,8 @@ _pack_srun_ping_msg(srun_ping_msg_t * msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack32 ( msg ->job_id  , buffer ) ;
-	pack32 ( msg ->step_id , buffer ) ;
+	pack32((uint32_t)msg ->job_id  , buffer ) ;
+	pack32((uint32_t)msg ->step_id , buffer ) ;
 }
 
 static int  
@@ -3292,8 +3281,8 @@ _unpack_srun_ping_msg(srun_ping_msg_t ** msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (srun_ping_msg_t) ) ;
 	*msg_ptr = msg;
 
-	safe_unpack32 ( & msg->job_id  , buffer ) ;
-	safe_unpack32 ( & msg->step_id , buffer ) ;
+	safe_unpack32(&msg->job_id  , buffer ) ;
+	safe_unpack32(&msg->step_id , buffer ) ;
 	return SLURM_SUCCESS;
 
     unpack_error:
@@ -3307,9 +3296,9 @@ _pack_srun_node_fail_msg(srun_node_fail_msg_t * msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack32 ( msg ->job_id  , buffer ) ;
-	pack32 ( msg ->step_id , buffer ) ;
-	packstr ( msg ->nodelist, buffer ) ;
+	pack32((uint32_t)msg ->job_id  , buffer ) ;
+	pack32((uint32_t)msg ->step_id , buffer ) ;
+	packstr(msg ->nodelist, buffer ) ;
 }
 
 static int 
@@ -3322,8 +3311,8 @@ _unpack_srun_node_fail_msg(srun_node_fail_msg_t ** msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (srun_node_fail_msg_t) ) ;
 	*msg_ptr = msg;
 
-	safe_unpack32 ( & msg->job_id  , buffer ) ;
-	safe_unpack32 ( & msg->step_id , buffer ) ;
+	safe_unpack32(&msg->job_id  , buffer ) ;
+	safe_unpack32(&msg->step_id , buffer ) ;
 	safe_unpackstr_xmalloc ( & msg->nodelist, &uint16_tmp, buffer);
 
 	return SLURM_SUCCESS;
@@ -3340,7 +3329,7 @@ _pack_job_ready_msg(job_id_msg_t * msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack32 ( msg -> job_id  , buffer ) ;
+	pack32((uint32_t)msg->job_id  , buffer ) ;
 }
 
 static int
@@ -3352,7 +3341,7 @@ _unpack_job_ready_msg(job_id_msg_t ** msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (job_id_msg_t) );
 	*msg_ptr = msg ;
 
-	safe_unpack32 ( & msg -> job_id  , buffer ) ;
+	safe_unpack32(&msg->job_id  , buffer ) ;
 	return SLURM_SUCCESS;
 
       unpack_error:
@@ -3366,8 +3355,8 @@ _pack_srun_timeout_msg(srun_timeout_msg_t * msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack32 ( msg -> job_id  , buffer ) ;
-	pack32 ( msg -> step_id , buffer ) ;
+	pack32((uint32_t)msg->job_id, buffer ) ;
+	pack32((uint32_t)msg->step_id , buffer ) ;
 	pack_time ( msg -> timeout, buffer );
 }
 
@@ -3380,9 +3369,9 @@ _unpack_srun_timeout_msg(srun_timeout_msg_t ** msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (srun_timeout_msg_t) ) ;
 	*msg_ptr = msg ;
 
-	safe_unpack32 ( & msg -> job_id  , buffer ) ;
-	safe_unpack32 ( & msg -> step_id , buffer ) ;
-	safe_unpack_time ( & msg -> timeout , buffer );
+	safe_unpack32(&msg->job_id, buffer ) ;
+	safe_unpack32(&msg->step_id, buffer ) ;
+	safe_unpack_time (&msg->timeout, buffer );
 	return SLURM_SUCCESS;
 
     unpack_error:
@@ -3395,8 +3384,8 @@ static void _pack_suspend_msg(suspend_msg_t *msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack16 ( msg -> op,      buffer ) ;
-	pack32 ( msg -> job_id,  buffer ) ;
+	pack16((uint16_t)msg -> op, buffer ) ;
+	pack32((uint32_t)msg->job_id,  buffer ) ;
 }
 
 static int  _unpack_suspend_msg(suspend_msg_t **msg_ptr, Buf buffer)
@@ -3407,8 +3396,8 @@ static int  _unpack_suspend_msg(suspend_msg_t **msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (suspend_msg_t) );
 	*msg_ptr = msg ;
 
-	safe_unpack16 ( & msg -> op ,      buffer ) ;
-	safe_unpack32 ( & msg -> job_id  , buffer ) ;
+	safe_unpack16(&msg->op ,      buffer ) ;
+	safe_unpack32(&msg->job_id  , buffer ) ;
 	return SLURM_SUCCESS;
 
     unpack_error:
@@ -3422,10 +3411,10 @@ _pack_checkpoint_msg(checkpoint_msg_t *msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack16 ( msg -> op,      buffer ) ;
-	pack16 ( msg -> data,    buffer ) ;
-	pack32 ( msg -> job_id,  buffer ) ;
-	pack32 ( msg -> step_id, buffer ) ;
+	pack16((uint16_t)msg->op,      buffer ) ;
+	pack16((uint16_t)msg->data,    buffer ) ;
+	pack32((uint32_t)msg->job_id,  buffer ) ;
+	pack32((uint32_t)msg->step_id, buffer ) ;
 }
 
 static int
@@ -3437,10 +3426,10 @@ _unpack_checkpoint_msg(checkpoint_msg_t **msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (checkpoint_msg_t) ) ;
 	*msg_ptr = msg ;
 
-	safe_unpack16 ( & msg -> op ,      buffer ) ;
-	safe_unpack16 ( & msg -> data ,    buffer ) ;
-	safe_unpack32 ( & msg -> job_id  , buffer ) ;
-	safe_unpack32 ( & msg -> step_id , buffer ) ;
+	safe_unpack16(&msg->op, buffer ) ;
+	safe_unpack16(&msg->data, buffer ) ;
+	safe_unpack32(&msg->job_id, buffer ) ;
+	safe_unpack32(&msg->step_id, buffer ) ;
 	return SLURM_SUCCESS;
 
     unpack_error:
@@ -3454,9 +3443,9 @@ _pack_checkpoint_comp(checkpoint_comp_msg_t *msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack32 ( msg -> job_id,  buffer ) ;
-	pack32 ( msg -> step_id, buffer ) ;
-	pack32 ( msg -> error_code, buffer ) ;
+	pack32((uint32_t)msg -> job_id,  buffer ) ;
+	pack32((uint32_t)msg -> step_id, buffer ) ;
+	pack32((uint32_t)msg -> error_code, buffer ) ;
 	packstr ( msg -> error_msg, buffer ) ;
 	pack_time ( msg -> begin_time, buffer ) ;
 }
@@ -3471,9 +3460,9 @@ _unpack_checkpoint_comp(checkpoint_comp_msg_t **msg_ptr, Buf buffer)
 	msg = xmalloc ( sizeof (checkpoint_comp_msg_t) );
 	*msg_ptr = msg ;
 
-	safe_unpack32 ( & msg -> job_id  , buffer ) ;
-	safe_unpack32 ( & msg -> step_id , buffer ) ;
-	safe_unpack32 ( & msg -> error_code , buffer ) ;
+	safe_unpack32(& msg -> job_id  , buffer ) ;
+	safe_unpack32(& msg -> step_id , buffer ) ;
+	safe_unpack32(& msg -> error_code , buffer ) ;
 	safe_unpackstr_xmalloc ( & msg -> error_msg, & uint16_tmp , buffer ) ;
 	safe_unpack_time ( & msg -> begin_time , buffer ) ;
 	return SLURM_SUCCESS;
@@ -3491,7 +3480,7 @@ _pack_checkpoint_resp_msg(checkpoint_resp_msg_t *msg, Buf buffer)
 	xassert ( msg != NULL );
 
 	pack_time ( msg -> event_time, buffer ) ;
-	pack32 ( msg -> error_code,  buffer ) ;
+	pack32((uint32_t)msg -> error_code,  buffer ) ;
 	packstr ( msg -> error_msg, buffer ) ;
 }
 
@@ -3506,7 +3495,7 @@ _unpack_checkpoint_resp_msg(checkpoint_resp_msg_t **msg_ptr, Buf buffer)
 	*msg_ptr = msg ;
 
 	safe_unpack_time ( & msg -> event_time, buffer ) ;
-	safe_unpack32 ( & msg -> error_code , buffer ) ;
+	safe_unpack32(& msg -> error_code , buffer ) ;
 	safe_unpackstr_xmalloc ( & msg -> error_msg, & uint16_tmp , buffer ) ;
 	return SLURM_SUCCESS;
 
@@ -3521,8 +3510,8 @@ static void _pack_jobacct_data(jobacct_msg_t * msg , Buf buffer )
 	xassert ( msg != NULL );
 	/* debug("jobacct: packing message"); */
 
-	pack16 ( msg -> len, buffer );
-	packmem ( msg -> data, msg->len, buffer ) ;
+	pack16((uint16_t)msg->len, buffer );
+	packmem(msg->data, msg->len, buffer ) ;
 }
 
 static int _unpack_jobacct_data(jobacct_msg_t ** msg_ptr , Buf buffer )
@@ -3535,13 +3524,13 @@ static int _unpack_jobacct_data(jobacct_msg_t ** msg_ptr , Buf buffer )
 	msg = xmalloc ( sizeof (jobacct_msg_t) ) ;
 	*msg_ptr = msg;
 
-	safe_unpack16 ( & msg -> len, buffer );
-	safe_unpackmem_xmalloc ( & msg -> data, & uint16_tmp , buffer ) ;
+	safe_unpack16(&msg->len, buffer );
+	safe_unpackmem_xmalloc(&msg->data, &uint16_tmp , buffer ) ;
 	/* debug("jobacct: unpacked message"); */
 	return SLURM_SUCCESS;
 
     unpack_error:
-	xfree(msg -> data);
+	xfree(msg->data);
 	xfree(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
@@ -3609,7 +3598,7 @@ static void _pack_kvs_rec(struct kvs_comm *msg_ptr, Buf buffer)
 	xassert(msg_ptr != NULL);
 
 	packstr(msg_ptr->kvs_name, buffer);
-	pack16(msg_ptr->kvs_cnt, buffer);
+	pack16((uint16_t)msg_ptr->kvs_cnt, buffer);
 	for (i=0; i<msg_ptr->kvs_cnt; i++) {
 		packstr(msg_ptr->kvs_keys[i], buffer);
 		packstr(msg_ptr->kvs_values[i], buffer);
@@ -3643,8 +3632,8 @@ static void _pack_kvs_data(struct kvs_comm_set *msg_ptr, Buf buffer)
 	int i;
 	xassert(msg_ptr != NULL);
 
-	pack16(msg_ptr->task_id, buffer);
-	pack16(msg_ptr->kvs_comm_recs, buffer);
+	pack16((uint16_t)msg_ptr->task_id, buffer);
+	pack16((uint16_t)msg_ptr->kvs_comm_recs, buffer);
 	for (i=0; i<msg_ptr->kvs_comm_recs; i++) 
 		_pack_kvs_rec(msg_ptr->kvs_comm_ptr[i], buffer);
 }
@@ -3682,9 +3671,9 @@ unpack_error:
 
 static void _pack_kvs_get(kvs_get_msg_t *msg_ptr, Buf buffer)
 {
-	pack16(msg_ptr->task_id, buffer);
-	pack16(msg_ptr->size, buffer);
-	pack16(msg_ptr->port, buffer);
+	pack16((uint16_t)msg_ptr->task_id, buffer);
+	pack16((uint16_t)msg_ptr->size, buffer);
+	pack16((uint16_t)msg_ptr->port, buffer);
 	packstr(msg_ptr->hostname, buffer);
 }
 
@@ -3712,8 +3701,8 @@ void pack_ ( * msg , Buf buffer )
 {
 	xassert ( msg != NULL );
 
-	pack16 ( msg -> , buffer ) ;
-	pack32 ( msg -> , buffer ) ;
+	pack16( msg -> , buffer ) ;
+	pack32((uint32_t)msg -> , buffer ) ;
 	packstr ( msg -> , buffer ) ;
 }
 
@@ -3727,8 +3716,8 @@ int unpack_ ( ** msg_ptr , Buf buffer )
 	msg = xmalloc ( sizeof ( ) ) ;
 	*msg_ptr = msg;
 
-	safe_unpack16 ( & msg -> , buffer ) ;
-	safe_unpack32 ( & msg -> , buffer ) ;
+	safe_unpack16( & msg -> , buffer ) ;
+	safe_unpack32(& msg -> , buffer ) ;
 	safe_unpackstr_xmalloc ( & msg -> x, & uint16_tmp , buffer ) ;
 	return SLURM_SUCCESS;
 
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index 9b30a5abeb1..f1ae861be88 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -62,6 +62,7 @@ int color_count = 0;
 char letters[62];
 char colors[6];
 pthread_mutex_t api_file_mutex = PTHREAD_MUTEX_INITIALIZER;
+bool *passthrough = NULL;
 
 /** internal helper functions */
 #ifdef HAVE_BG_FILES
@@ -143,6 +144,9 @@ static int *_set_best_path();
 /* */
 static int _set_one_dim(int *start, int *end, int *coord);
 
+/* */
+static void _destroy_geo(void *object);
+
 /* Global */
 List bp_map_list;
 List bg_info_list;
@@ -167,15 +171,7 @@ extern void destroy_bg_info_record(void* object)
  * create a block request.  Note that if the geometry is given,
  * then size is ignored.  
  * 
- * OUT - ba_request: structure to allocate and fill in.  
- * IN - geometry: requested geometry of block
- * IN - size: requested size of block
- * IN - rotate: if true, allows rotation of block during fit
- * IN - elongate: if true, will try to fit different geometries of
- *      same size requests
- * IN - contig: enforce contiguous regions constraint
- * IN - conn_type: connection type of request (SELECT_TORUS or SELECT_MESH)
- * 
+ * IN/OUT - ba_request: structure to allocate and fill in.  
  * return SUCCESS of operation.
  */
 extern int new_ba_request(ba_request_t* ba_request)
@@ -190,14 +186,16 @@ extern int new_ba_request(ba_request_t* ba_request)
 	int *geo_ptr;
 	int messed_with = 0;
 	
+	ba_request->save_name= NULL;
 	ba_request->rotate_count= 0;
 	ba_request->elongate_count = 0;
-	ba_request->elongate_geos = list_create(NULL);
+	ba_request->elongate_geos = list_create(_destroy_geo);
 	geo[X] = ba_request->geometry[X];
 	geo[Y] = ba_request->geometry[Y];
 	geo[Z] = ba_request->geometry[Z];
-		
-	if(geo[X] != -1) { 
+	passthrough = &ba_request->passthrough;
+
+	if(geo[X] != (uint16_t)NO_VAL) { 
 		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++){
 			if ((geo[i] < 1) 
 			    ||  (geo[i] > DIM_SIZE[i])){
@@ -465,10 +463,10 @@ endit:
 	
 	ba_request->rotate_count= 0;
 	ba_request->elongate_count = 0;
-	ba_request->elongate_geos = list_create(NULL);
+	ba_request->elongate_geos = list_create(_destroy_geo);
 	geo[X] = ba_request->geometry[X];
 		
-	if(geo[X] != -1) { 
+	if(geo[X] != NO_VAL) { 
 		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++){
 			if ((geo[i] < 1) 
 			    ||  (geo[i] > DIM_SIZE[i])){
@@ -495,14 +493,10 @@ endit:
  */
 extern void delete_ba_request(ba_request_t *ba_request)
 {
-	int *geo_ptr;
-
-	if(ba_request->save_name!=NULL)
-		xfree(ba_request->save_name);
+	xfree(ba_request->save_name);
+	if(ba_request->elongate_geos)
+		list_destroy(ba_request->elongate_geos);
 	
-	while((geo_ptr = list_pop(ba_request->elongate_geos)) != NULL)
-		xfree(geo_ptr);
-
 	xfree(ba_request);
 }
 
@@ -526,7 +520,6 @@ extern void print_ba_request(ba_request_t* ba_request)
 	debug("   conn_type:\t%d", ba_request->conn_type);
 	debug("      rotate:\t%d", ba_request->rotate);
 	debug("    elongate:\t%d", ba_request->elongate);
-	debug("force contig:\t%d", ba_request->force_contig);
 }
 
 /**
@@ -601,7 +594,10 @@ extern void ba_init(node_info_msg_t *node_info_ptr)
 	_db2_check();
 		
 	best_count=BEST_COUNT_INIT;
-						
+
+	if(ba_system_ptr)
+		_delete_ba_system();
+
 	ba_system_ptr = (ba_system_t *) xmalloc(sizeof(ba_system_t));
 	
 	ba_system_ptr->xcord = 1;
@@ -789,8 +785,8 @@ extern void ba_set_node_down(ba_node_t *ba_node)
 {
 	if (!_initialized){
 		error("Error, configuration not initialized, "
-			"call init_configuration first");
-		return;
+		      "calling ba_init(NULL)");
+		ba_init(NULL);
 	}
 
 #ifdef DEBUG_PA
@@ -820,9 +816,8 @@ extern int allocate_block(ba_request_t* ba_request, List results)
 {
 
 	if (!_initialized){
-		error("allocate_block Error, configuration not initialized, "
-		      "call init_configuration first");
-		return 0;
+		error("Error, configuration not initialized, "
+		      "calling ba_init(NULL)");
 	}
 
 	if (!ba_request){
@@ -949,7 +944,7 @@ extern char *set_bg_block(List results, int *start,
 		results = list_create(NULL);
 	else
 		send_results = 1;
-	start_list = list_create(NULL);
+	
 #ifdef HAVE_BG
 	if(start[X]>=DIM_SIZE[X] 
 	   || start[Y]>=DIM_SIZE[Y]
@@ -965,10 +960,10 @@ extern char *set_bg_block(List results, int *start,
 	ba_node = &ba_system_ptr->
 			grid[start[X]];	
 #endif
-
+	
 	if(!ba_node)
 		return NULL;
-	
+		
 	list_append(results, ba_node);
 	found = _find_x_path(results, ba_node,
 			     ba_node->coord, 
@@ -978,7 +973,7 @@ extern char *set_bg_block(List results, int *start,
 			     conn_type);
 
 	if(!found) {
-		debug("trying less efficient code");
+		debug2("trying less efficient code");
 		remove_block(results, color_count);
 		list_destroy(results);
 		results = list_create(NULL);
@@ -992,6 +987,7 @@ extern char *set_bg_block(List results, int *start,
 	}
 	if(found) {
 #ifdef HAVE_BG
+		start_list = list_create(NULL);
 		itr = list_iterator_create(results);
 		while((ba_node = (ba_node_t*) list_next(itr))) {
 			list_append(start_list, ba_node);
@@ -1001,8 +997,11 @@ extern char *set_bg_block(List results, int *start,
 		if(!_fill_in_coords(results, 
 				    start_list, 
 				    geometry, 
-				    conn_type))			
+				    conn_type)) {
+			list_destroy(start_list);
 			return NULL;
+		}
+		list_destroy(start_list);			
 #endif		
 	} else {
 		return NULL;
@@ -1509,7 +1508,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 				       node_tar[X],
 				       node_tar[Y],
 				       node_tar[Z]);
-				if(conn_type == TORUS) {
+				if(conn_type == SELECT_TORUS) {
 					dim_curr_switch->
 						int_wire[0].used = 1;
 					dim_curr_switch->
@@ -1581,8 +1580,8 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 				}
 								
 			} else {
-				if(conn_type == TORUS || 
-				   (conn_type == MESH && 
+				if(conn_type == SELECT_TORUS || 
+				   (conn_type == SELECT_MESH && 
 				    (node_tar[i2] != first[i2]))) {
 					dim_curr_switch->
 						int_wire[0].used = 1;
@@ -2009,11 +2008,11 @@ start_again:
 #endif
 		       x);
 	new_node:
-		debug("starting at %d%d%d",
-		      start[X]
+		debug2("starting at %d%d%d",
+		       start[X]
 #ifdef HAVE_BG
-		      , start[Y],
-		      start[Z]
+		       , start[Y],
+		       start[Z]
 #endif
 			);
 		
@@ -2038,7 +2037,7 @@ start_again:
 			if(ba_request->start_req) 
 				goto requested_end;
 			//exit(0);
-			debug("trying something else");
+			debug2("trying something else");
 			remove_block(results, color_count);
 			list_destroy(results);
 			results = list_create(NULL);
@@ -2076,7 +2075,7 @@ start_again:
 #endif
 	}							
 requested_end:
-	error("can't allocate");
+	debug("can't allocate");
 	
 	return 0;
 }
@@ -2458,15 +2457,16 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 				set=1;
 			}
 		} else {
-			error("No network connection to create bgblock "
-			      "containing %s", name);
-			error("Use smap to define bgblocks in bluegene.conf");
+			debug("No network connection to create "
+			      "bgblock containing %s", name);
+			debug("Use smap to define bgblocks in "
+			      "bluegene.conf");
 			xfree(name);
 			return NULL;
 		}
 	}
 
-	if(conn_type == TORUS)
+	if(conn_type == SELECT_TORUS)
 		for(i=0;i<count;i++) {
 			_set_one_dim(start, end, ba_node[i]->coord);
 		}
@@ -2497,7 +2497,6 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 	int highest_phys_x = geometry[X] - start[X];
 	
 	ListIterator itr;
-	List path = NULL;
 
 	if(!ba_node)
 		return 0;
@@ -2554,12 +2553,9 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				       node_tar[X],
 				       node_tar[Y],
 				       node_tar[Z]);
-				if((node_tar[X] == 
-				    next_node->coord[X] && 
-				    node_tar[Y] == 
-				    next_node->coord[Y] && 
-				    node_tar[Z] == 
-				    next_node->coord[Z])) {
+				if((node_tar[X] == next_node->coord[X] && 
+				    node_tar[Y] == next_node->coord[Y] && 
+				    node_tar[Z] == next_node->coord[Z])) {
 					not_first = 1;
 					break;
 				}				
@@ -2582,7 +2578,8 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				;
 			next_switch = &next_node->axis_switch[X];
 
- 			if((conn_type == MESH) && (found == (geometry[X]))) {
+ 			if((conn_type == SELECT_MESH) 
+			   && (found == (geometry[X]))) {
 				debug2("we found the end of the mesh");
 				return 1;
 			}
@@ -2592,8 +2589,13 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			       next_node->phys_x, highest_phys_x);
 			if(next_node->phys_x >= highest_phys_x) {
 				debug2("looking for a passthrough");
-				list_destroy(best_path);
+				if(best_path)
+					list_destroy(best_path);
 				best_path = list_create(_delete_path_list);
+				if(path)
+					list_destroy(path);
+				path = list_create(_delete_path_list);
+	
 				_find_passthrough(curr_switch,
 						  0,
 						  results,
@@ -2647,8 +2649,12 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				goto found_path;
 			} else if(found == geometry[X]) {
 				debug2("finishing the torus!");
-				list_destroy(best_path);
+				if(best_path)
+					list_destroy(best_path);
 				best_path = list_create(_delete_path_list);
+				if(path)
+					list_destroy(path);
+				path = list_create(_delete_path_list);
 				_finish_torus(curr_switch, 
 					      0, 
 					      results, 
@@ -2786,10 +2792,10 @@ static int _find_x_path2(List results, ba_node_t *ba_node,
 	int *start, int *first, int *geometry, 
 	int found, int conn_type) 
 {
-ba_switch_t *curr_switch = NULL; 
-ba_switch_t *next_switch = NULL; 
+	ba_switch_t *curr_switch = NULL; 
+	ba_switch_t *next_switch = NULL; 
 	
-int port_tar;
+	int port_tar;
 	int source_port=0;
 	int target_port=0;
 	int num_visited=0;
@@ -2801,8 +2807,7 @@ int port_tar;
 	ba_node_t *check_node = NULL;
 	
 	ListIterator itr;
-	List path = NULL;
-
+	
 	if(!ba_node)
 		return 0;
 
@@ -2872,7 +2877,8 @@ int port_tar;
 			next_switch = &next_node->axis_switch[X];
 		
 			
- 			if((conn_type == MESH) && (found == (geometry[X]))) {
+ 			if((conn_type == SELECT_MESH) 
+			   && (found == (geometry[X]))) {
 				debug2("we found the end of the mesh");
 				return 1;
 			}
@@ -2882,8 +2888,12 @@ int port_tar;
 				goto found_path;
 			} else if(found == geometry[X]) {
 				debug2("finishing the torus!");
-				list_destroy(best_path);
+				if(best_path)
+					list_destroy(best_path);
 				best_path = list_create(_delete_path_list);
+				if(path)
+					list_destroy(path);
+				path = list_create(_delete_path_list);
 				_finish_torus(curr_switch, 
 					      0, 
 					      results, 
@@ -3007,7 +3017,6 @@ int port_tar;
 							= port_tar;
 					}
 					return 1;
-
 				}
 			} 			
 		}
@@ -3019,15 +3028,19 @@ int port_tar;
 	       ba_node->coord[Z]);
 #endif
 
-	list_destroy(best_path);
+	if(best_path)
+		list_destroy(best_path);
 	best_path = list_create(_delete_path_list);
+	if(path)
+		list_destroy(path);
+	path = list_create(_delete_path_list);
 	int ports_to_try2[2] = {2,4};
 	
 	_find_next_free_using_port_2(curr_switch, 
-			0, 
-			results, 
-			X, 
-			0);
+				     0, 
+				     results, 
+				     X, 
+				     0);
 	if(best_count < BEST_COUNT_INIT) {
 		debug2("yes found next free %d", best_count);
 		node_tar = _set_best_path();
@@ -3255,7 +3268,9 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 			_find_next_free_using_port_2(next_switch, 
 					port_tar, nodes,
 					dim, count);
-			while(list_pop(path) != path_add){
+			while((temp_switch = list_pop(path)) != path_add){
+				xfree(temp_switch);
+				debug3("something here 1");
 			}
 		}
 	}
@@ -3444,8 +3459,11 @@ static int _find_passthrough(ba_switch_t *curr_switch, int source_port,
 		
 				_find_passthrough(next_switch, port_tar, nodes,
 						dim, count, highest_phys_x);
-				while(list_pop(path) != path_add){
-				} 
+				while((temp_switch = list_pop(path)) 
+				      != path_add){
+					xfree(temp_switch);
+					debug3("something here 2");
+				}
 			}
 		}
 	}
@@ -3585,7 +3603,10 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 				list_push(path, path_add);
 				_finish_torus(next_switch, port_tar, nodes,
 						dim, count, start);
-				while(list_pop(path) != path_add){
+				while((temp_switch = list_pop(path)) 
+				      != path_add){
+					xfree(temp_switch);
+					debug3("something here 3");
 				} 
 			}
 		}
@@ -3604,6 +3625,8 @@ static int *_set_best_path()
 		return NULL;
 	itr = list_iterator_create(best_path);
 	while((path_switch = (ba_path_switch_t*) list_next(itr))) {
+		if(passthrough)
+			*passthrough = true;
 #ifdef HAVE_BG
 		debug3("mapping %d%d%d",path_switch->geometry[X],
 		       path_switch->geometry[Y],
@@ -3661,6 +3684,11 @@ static int _set_one_dim(int *start, int *end, int *coord)
 	return 1;
 }
 
+static void _destroy_geo(void *object) {
+	int *geo_ptr = (int *)object;
+	xfree(geo_ptr);
+}
+
 //#define BUILD_EXE
 #ifdef BUILD_EXE
 /** */
@@ -3697,7 +3725,7 @@ int main(int argc, char** argv)
 /* 	request->size = 32; */
 /* 	request->rotate = 0; */
 /* 	request->elongate = 0; */
-/* 	request->conn_type = TORUS; */
+/* 	request->conn_type = SELECT_TORUS; */
 /* 	new_ba_request(request); */
 /* 	print_ba_request(request); */
 /* 	if(!allocate_block(request, results)) { */
@@ -3719,7 +3747,7 @@ int main(int argc, char** argv)
 	request->size = 1;
 	request->rotate = 0;
 	request->elongate = 0;
-	request->conn_type = TORUS;
+	request->conn_type = SELECT_TORUS;
 	new_ba_request(request);
 	print_ba_request(request);
 	if(!allocate_block(request, results)) {
@@ -3736,7 +3764,7 @@ int main(int argc, char** argv)
 	request->geometry[2] = 1;
 	request->start_req = 0;
 	request->size = 1;
-	request->conn_type = TORUS;
+	request->conn_type = SELECT_TORUS;
 	new_ba_request(request);
 	print_ba_request(request);
 	if(!allocate_block(request, results)) {
@@ -3752,7 +3780,7 @@ int main(int argc, char** argv)
 /* 	request->geometry[1] = 4; */
 /* 	request->geometry[2] = 4; */
 /* 	//request->size = 2; */
-/* 	request->conn_type = TORUS; */
+/* 	request->conn_type = SELECT_TORUS; */
 /* 	new_ba_request(request); */
 /* 	print_ba_request(request); */
 /* 	if(!allocate_block(request, results)) { */
@@ -3767,7 +3795,7 @@ int main(int argc, char** argv)
 /* 	request->geometry[1] = 4; */
 /* 	request->geometry[2] = 4; */
 /* 	//request->size = 2; */
-/* 	request->conn_type = TORUS; */
+/* 	request->conn_type = SELECT_TORUS; */
 /* 	new_ba_request(request); */
 /* 	print_ba_request(request); */
 /* 	if(!allocate_block(request, results)) { */
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h
index 2ea6885e7bc..29c7d4396df 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.h
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.h
@@ -80,8 +80,6 @@ extern bool have_db2;
 enum {X, Y, Z};
 
 /* */
-enum {MESH, TORUS, SMALL};
-enum {COPROCESSOR, VIRTUAL};
 
 /* NOTE: Definition of bg_info_record_t moved to src/api/node_select_info.h */
 
@@ -124,12 +122,15 @@ typedef struct {
 	int start[BA_SYSTEM_DIMENSIONS];
 	int start_req;
 	int size; 
+	int procs; 
 	int conn_type;
 	int rotate_count;
 	int elongate_count;
+	int nodecards;
+	int quarters;
+	bool passthrough;
 	bool rotate;
 	bool elongate; 
-	bool force_contig;
 	List elongate_geos;
 } ba_request_t; 
 
@@ -167,7 +168,6 @@ typedef struct
 {
 	ba_connection_t int_wire[NUM_PORTS_PER_NODE];
 	ba_connection_t ext_wire[NUM_PORTS_PER_NODE];
-
 } ba_switch_t;
 
 /*
@@ -185,8 +185,7 @@ typedef struct {
 	int indecies;
 	int state;
 	int conn_type;
-	int phys_x;
-	
+	int phys_x;	
 } ba_node_t;
 
 typedef struct {
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c
index 45e1664e7e0..367148f4cb1 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.c
+++ b/src/plugins/select/bluegene/plugin/bg_block_info.c
@@ -162,6 +162,7 @@ extern int block_ready(struct job_record *job_ptr)
 	rc = select_g_get_jobinfo(job_ptr->select_jobinfo,
 				  SELECT_DATA_BLOCK_ID, &block_id);
 	if (rc == SLURM_SUCCESS) {
+		slurm_mutex_lock(&block_state_mutex);
 		bg_record = find_bg_record(block_id);
 		
 		if(bg_record) {
@@ -177,6 +178,7 @@ extern int block_ready(struct job_record *job_ptr)
 			      block_id);
 			rc = READY_JOB_FATAL;	/* fatal error */
 		}
+		slurm_mutex_unlock(&block_state_mutex);
 		xfree(block_id);
 	} else
 		rc = READY_JOB_ERROR;
@@ -193,7 +195,9 @@ extern void pack_block(bg_record_t *bg_record, Buf buffer)
 	pack16((uint16_t)bg_record->state, buffer);
 	pack16((uint16_t)bg_record->conn_type, buffer);
 	pack16((uint16_t)bg_record->node_use, buffer);	
-	pack32(bg_record->quarter, buffer);	
+	pack16((uint16_t)bg_record->quarter, buffer);	
+	pack16((uint16_t)bg_record->segment, buffer);	
+	pack32((uint32_t)bg_record->node_cnt, buffer);	
 }
 
 extern int update_block_list()
@@ -243,7 +247,7 @@ extern int update_block_list()
 				break;
 			}
 		} else {
-			if ((rc = rm_get_data(block_list, RM_PartListFirstPart, 
+			if ((rc = rm_get_data(block_list, RM_PartListFirstPart,
 					      &block_ptr)) != STATUS_OK) {
 				error("rm_get_data(RM_PartListFirstPart: %s",
 				      bg_err_str(rc));
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 6029c707f28..887f271a6d4 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -29,9 +29,6 @@
 #include "src/common/node_select.h"
 #include "bluegene.h"
 
-#define BUFSIZE 4096
-#define BITSIZE 128
-
 #define _DEBUG 0
 
 #define SWAP(a,b,t)	\
@@ -42,10 +39,10 @@ _STMT_START {		\
 } _STMT_END
 
 static int  _find_best_block_match(struct job_record* job_ptr,
-				bitstr_t* slurm_block_bitmap,
-				int min_nodes, int max_nodes,
-				int spec, bg_record_t** found_bg_record,
-				bool test_only);
+				   bitstr_t* slurm_block_bitmap,
+				   int min_nodes, int max_nodes,
+				   int spec, bg_record_t** found_bg_record,
+				   bool test_only);
 static void _rotate_geo(uint16_t *req_geometry, int rot_cnt);
 
 /* Rotate a 3-D geometry array through its six permutations */
@@ -55,22 +52,14 @@ static void _rotate_geo(uint16_t *req_geometry, int rot_cnt)
 
 	switch (rot_cnt) {
 		case 0:		/* ABC -> ACB */
-			SWAP(req_geometry[1], req_geometry[2], tmp);
-			break;
-		case 1:		/* ACB -> CAB */
-			SWAP(req_geometry[0], req_geometry[1], tmp);
-			break;
 		case 2:		/* CAB -> CBA */
-			SWAP(req_geometry[1], req_geometry[2], tmp);
-			break;
-		case 3:		/* CBA -> BCA */
-			SWAP(req_geometry[0], req_geometry[1], tmp);
-			break;
 		case 4:		/* BCA -> BAC */
-			SWAP(req_geometry[1], req_geometry[2], tmp);
+			SWAP(req_geometry[Y], req_geometry[Z], tmp);
 			break;
+		case 1:		/* ACB -> CAB */
+		case 3:		/* CBA -> BCA */
 		case 5:		/* BAC -> ABC */
-			SWAP(req_geometry[0], req_geometry[1], tmp);
+			SWAP(req_geometry[X], req_geometry[Y], tmp);
 			break;
 	}
 }
@@ -91,70 +80,86 @@ static int _find_best_block_match(struct job_record* job_ptr,
 		int spec, bg_record_t** found_bg_record, bool test_only)
 {
 	ListIterator itr;
-	bg_record_t* record = NULL;
-	int i, job_running = 0;
+	ListIterator itr2;
+	bg_record_t *record = NULL;
+	bg_record_t *found_record = NULL;
 	uint16_t req_geometry[BA_SYSTEM_DIMENSIONS];
-	uint16_t conn_type, rotate, target_size = 1;
+	uint16_t conn_type, rotate, target_size = 0;
 	uint32_t req_procs = job_ptr->num_procs;
-	int rot_cnt = 0;
 	uint32_t proc_cnt;
-       
+	ba_request_t request; 
+	int i, job_running = 0;
+	int rot_cnt = 0;
+	int created = 0;
+	int found = 0;
+	int max_procs = NO_VAL;
+	List lists_of_lists = NULL;
+	List temp_list = NULL;
+	char tmp_char[256];
+	bitstr_t* tmp_bitmap = NULL;
+
 	if(!bg_list) {
 		error("_find_best_block_match: There is no bg_list");
 		return SLURM_ERROR;
 	}
 	
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
-		SELECT_DATA_CONN_TYPE, &conn_type);
+			     SELECT_DATA_CONN_TYPE, &conn_type);
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+			     SELECT_DATA_GEOMETRY, &req_geometry);
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
-		SELECT_DATA_GEOMETRY, req_geometry);
+			     SELECT_DATA_ROTATE, &rotate);
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
-		SELECT_DATA_ROTATE, &rotate);
-	for (i=0; i<BA_SYSTEM_DIMENSIONS; i++)
-		target_size *= req_geometry[i];
-	if (target_size == 0)	/* no geometry specified */
+			     SELECT_DATA_ROTATE, &rotate);
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+			     SELECT_DATA_MAX_PROCS, &max_procs);
+
+	if(req_geometry[0] != (uint16_t)NO_VAL)
+		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++)
+			target_size *= (uint16_t)req_geometry[i];
+	if (target_size == 0) {	/* no geometry specified */
 		target_size = min_nodes;
+		req_geometry[X] = (uint16_t)NO_VAL;
+	}
 	/* this is where we should have the control flow depending on
 	 * the spec arguement */
 
 	*found_bg_record = NULL;
-	
-	debug("number of blocks to check: %d", list_count(bg_list));
+try_again:	
+	slurm_mutex_lock(&block_state_mutex);
+	debug("number of blocks to check: %d state %d", 
+	      list_count(bg_list),
+	      test_only);
      	itr = list_iterator_create(bg_list);
 	while ((record = (bg_record_t*) list_next(itr))) {
-		if ((record->job_running != -1) && (!test_only)) {
-			job_running++;
-			continue;
-		}
-		if(record->full_block && job_running) {
-			debug("Can't run on full system block "
-				"another block has a job running.");
+		/* Check processor count */
+		proc_cnt = record->bp_count * record->cpus_per_bp;
+		debug3("asking for %d-%d looking at %d", 
+		      req_procs, max_procs, proc_cnt);
+		if ((proc_cnt < req_procs)
+		    || (max_procs != NO_VAL && proc_cnt > max_procs)) {
+			/* We use the proccessor count per partition here
+			   mostly to see if we can run on a smaller partition. 
+			 */
+			convert_to_kilo(proc_cnt, tmp_char);
+			debug("block %s CPU count (%s) not suitable",
+			      record->bg_block_id, 
+			      tmp_char);
 			continue;
 		}
 
-		if (req_procs > record->cnodes_per_bp) {
-			/* We use the c-node count here. Job could start
-			 * twice this count if VIRTUAL_NODE_MODE, but this
-			 * is now controlled by mpirun, not SLURM 
-			 * We now use the number set by the admins in the
-			 * slurm.conf file.  This should never happen.
-			 */
-			proc_cnt = record->bp_count * record->cnodes_per_bp;
-			if (req_procs > proc_cnt) {
-				debug("block %s CPU count too low",
-					record->bg_block_id);
-				continue;
-			}
-		}
-		
 		/*
 		 * check that the number of nodes is suitable
 		 */
- 		if ((record->bp_count < min_nodes)
+ 		debug3("asking for %d-%d bps looking at %d", 
+		      min_nodes, max_nodes, record->bp_count);
+		if ((record->bp_count < min_nodes)
 		    ||  (max_nodes != 0 && record->bp_count > max_nodes)
 		    ||  (record->bp_count < target_size)) {
-			debug("block %s node count not suitable",
-				record->bg_block_id);
+			convert_to_kilo(record->node_cnt, tmp_char);
+			debug("block %s node count (%s) not suitable",
+			      record->bg_block_id,
+			      tmp_char);
 			continue;
 		}
 		
@@ -166,8 +171,8 @@ static int _find_best_block_match(struct job_record* job_ptr,
 		 * SLURM block not available to this job.
 		 */
 		if (!bit_super_set(record->bitmap, slurm_block_bitmap)) {
-			debug("bg block %s has nodes not usable by this "
-				"job", record->bg_block_id);
+			debug("bg block %s has nodes not usable by this job",
+			      record->bg_block_id);
 			continue;
 		}
 
@@ -181,28 +186,93 @@ static int _find_best_block_match(struct job_record* job_ptr,
 				record->bg_block_id);
 			continue;
 		}
-
+		/* If test_only we want to fall through to tell the 
+		   scheduler that it is runnable just not right now. 
+		*/
+		debug3("job_running = %d", record->job_running);
+		if((record->job_running != NO_VAL) 
+		   && !test_only) {
+			debug("block %s in use by %s", 
+			      record->bg_block_id,
+			      record->user_name);
+			found = 1;
+			continue;
+		}
+		
+		/* Make sure no other partitions are under this partition 
+		   are booted and running jobs
+		*/
+		itr2 = list_iterator_create(bg_list);
+		while ((found_record = (bg_record_t*)
+			list_next(itr2)) != NULL) {
+			if ((!found_record->bg_block_id)
+			    || (!strcmp(record->bg_block_id, 
+					found_record->bg_block_id)))
+				continue;
+			if(blocks_overlap(record, found_record)) {
+				if((found_record->job_running != NO_VAL) 
+				   && !test_only) {
+					debug("can't use %s, there is a job "
+					      "(%d) running on an overlapping "
+					      "block %s", 
+					      record->bg_block_id,
+					      found_record->job_running,
+					      found_record->bg_block_id);
+					if(bluegene_layout_mode == 
+					   LAYOUT_DYNAMIC) {
+						num_block_to_free = 0;
+						num_block_freed = 0;
+						list_remove(itr);
+						temp_list = list_create(NULL);
+						list_push(temp_list, record);
+						num_block_to_free++;
+						free_block_list(temp_list);
+						list_destroy(temp_list);
+						slurm_mutex_unlock(
+							&block_state_mutex);
+						/* wait for all necessary 
+						   blocks to be freed */
+						while(num_block_to_free 
+						      != num_block_freed) {
+							sleep(1);
+						}
+						slurm_mutex_lock(
+							&block_state_mutex);
+					}	
+					break;
+				}
+			} 
+		}
+		list_iterator_destroy(itr2);
+		if(found_record) {
+			found = 1;
+			continue;
+		} 
+				
+			
 		/***********************************************/
 		/* check the connection type specified matches */
 		/***********************************************/
 		if ((conn_type != record->conn_type)
-		&&  (conn_type != SELECT_NAV)) {
-			debug("bg block %s conn-type not usable", 
-				record->bg_block_id);
+		    && (conn_type != SELECT_NAV)) {
+			debug("bg block %s conn-type not usable asking for %s "
+			      "record is %s", 
+			      record->bg_block_id,
+			      convert_conn_type(conn_type),
+			      convert_conn_type(record->conn_type));
 			continue;
 		} 
 
 		/*****************************************/
 		/* match up geometry as "best" possible  */
 		/*****************************************/
-		if (req_geometry[0] == 0)
+		if (req_geometry[X] == (uint16_t)NO_VAL)
 			;	/* Geometry not specified */
 		else {	/* match requested geometry */
 			bool match = false;
 			rot_cnt = 0;	/* attempt six rotations  */
 
-			for (rot_cnt=0; rot_cnt<6; rot_cnt++) {
-				
+			for (rot_cnt=0; rot_cnt<6; rot_cnt++) {		
 				if ((record->geo[X] >= req_geometry[X])
 				&&  (record->geo[Y] >= req_geometry[Y])
 				&&  (record->geo[Z] >= req_geometry[Z])) {
@@ -217,21 +287,112 @@ static int _find_best_block_match(struct job_record* job_ptr,
 			if (!match) 
 				continue;	/* Not usable */
 		}
-		
 		*found_bg_record = record;
 		break;
 	}
 	list_iterator_destroy(itr);
-				
+	
+	if(!found && test_only && bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		slurm_mutex_unlock(&block_state_mutex);
+		
+		for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+			request.start[i] = 0;
+			
+		for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+			request.geometry[i] = req_geometry[i];
+			
+		request.save_name = NULL;
+		request.elongate_geos = NULL;
+		request.size = target_size;
+		request.procs = req_procs;
+		request.conn_type = conn_type;
+		request.rotate = rotate;
+		request.elongate = true;
+		request.start_req=0;
+		debug("trying with all free blocks");
+		if(create_dynamic_block(&request, NULL) == SLURM_ERROR) {
+			error("this job will never run on "
+			      "this system");
+			return SLURM_ERROR;
+		} else {
+			if(!request.save_name) {
+				error("no name returned from "
+				      "create_dynamic_block");
+				return SLURM_ERROR;
+			} 
+			sprintf(tmp_char, "%s%s\0", 
+				slurmctld_conf.node_prefix, request.save_name);
+			if (node_name2bitmap(tmp_char, 
+					     false, 
+					     &tmp_bitmap)) {
+				fatal("Unable to convert nodes %s to bitmap", 
+				      request.save_name);
+			}
+			
+			bit_and(slurm_block_bitmap, tmp_bitmap);
+			bit_free(tmp_bitmap);
+			xfree(request.save_name);
+			return SLURM_SUCCESS;
+		}
+	} else if(!*found_bg_record 
+		  && !created 
+		  && bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		debug2("going to create %d", target_size);
+		slurm_mutex_unlock(&block_state_mutex);
+		lists_of_lists = list_create(NULL);
+		list_append(lists_of_lists, bg_list);
+		list_append(lists_of_lists, bg_booted_block_list);
+		list_append(lists_of_lists, bg_job_block_list);
+		itr = list_iterator_create(lists_of_lists);
+		while ((temp_list = (List)list_next(itr)) != NULL) {
+			created++;
+
+			for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+				request.start[i] = 0;
+			
+			for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+				request.geometry[i] = req_geometry[i];
+			
+			request.save_name = NULL;
+			request.elongate_geos = NULL;
+			request.size = target_size;
+			request.procs = req_procs;
+			request.conn_type = conn_type;
+			request.rotate = rotate;
+			request.elongate = true;
+			request.start_req=0;
+			/* 1- try empty space
+			   2- we see if we can create one in the 
+			   unused bps
+			   3- see if we can create one in the non 
+			   job running bps
+			*/
+			debug("trying with %d", created);
+			if(create_dynamic_block(&request, temp_list) 
+			   == SLURM_SUCCESS) {
+				list_iterator_destroy(itr);
+				list_destroy(lists_of_lists);
+				lists_of_lists = NULL;
+				goto try_again;
+			}
+		}
+		list_iterator_destroy(itr);
+		if(lists_of_lists)
+			list_destroy(lists_of_lists);
+		slurm_mutex_lock(&block_state_mutex);		
+	}
+	
 	/* set the bitmap and do other allocation activities */
 	if (*found_bg_record) {
 		debug("_find_best_block_match %s <%s>", 
 			(*found_bg_record)->bg_block_id, 
 			(*found_bg_record)->nodes);
 		bit_and(slurm_block_bitmap, (*found_bg_record)->bitmap);
+		slurm_mutex_unlock(&block_state_mutex);
 		return SLURM_SUCCESS;
 	}
-	
+		
+	slurm_mutex_unlock(&block_state_mutex);
 	debug("_find_best_block_match none found");
 	return SLURM_ERROR;
 }
@@ -252,7 +413,11 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	int spec = 1; /* this will be like, keep TYPE a priority, etc,  */
 	bg_record_t* record = NULL;
 	char buf[100];
-		
+	int i, rc = SLURM_SUCCESS;
+	uint16_t geo[BA_SYSTEM_DIMENSIONS];
+	uint16_t tmp16 = (uint16_t)NO_VAL;
+	
+	
 	select_g_sprint_jobinfo(job_ptr->select_jobinfo, buf, sizeof(buf), 
 		SELECT_PRINT_MIXED);
 	debug("bluegene:submit_job: %s nodes=%d-%d", 
@@ -260,17 +425,55 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	      min_nodes, 
 	      max_nodes);
 	
-	if ((_find_best_block_match(job_ptr, slurm_block_bitmap, min_nodes, 
-				max_nodes, spec, &record, test_only)) 
-				== SLURM_ERROR) {
-		return SLURM_ERROR;
-	} else {
-		/* set the block id and quarter (if any) */
-		select_g_set_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_BLOCK_ID, record->bg_block_id);
-		select_g_set_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_QUARTER, &record->quarter);
+	rc = _find_best_block_match(job_ptr, slurm_block_bitmap, min_nodes, 
+				    max_nodes, spec, &record, test_only);
+	
+	if (rc == SLURM_SUCCESS) {
+		if(!record) {
+			debug2("can run, but block not made");
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_BLOCK_ID,
+					     "unassigned");
+			
+			min_nodes *= bluegene_bp_node_cnt;
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_NODE_CNT,
+					     &min_nodes);
+			
+			for (i=0; i<BA_SYSTEM_DIMENSIONS; i++)
+				geo[i] = 0;
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_GEOMETRY, 
+					     &geo);
+			
+		} else {
+			/* set the block id and info about block */
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_BLOCK_ID, 
+					     record->bg_block_id);
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_QUARTER, 
+					     &record->quarter);
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_SEGMENT, 
+					     &record->segment);
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_NODE_CNT, 
+					     &record->node_cnt);
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_GEOMETRY, 
+					     &record->geo);
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_CONN_TYPE, 
+					     &record->conn_type);
+		}
+		if(test_only) {
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_BLOCK_ID,
+					     "unassigned");
+				
+		} 
 	}
 
-	return SLURM_SUCCESS;
+	return rc;
 }
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.h b/src/plugins/select/bluegene/plugin/bg_job_place.h
index 38d6190e4d9..57c5affa4d5 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.h
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.h
@@ -33,9 +33,12 @@
  * Try to find resources for a given job request
  * IN job_ptr - pointer to job record in slurmctld
  * IN/OUT bitmap - nodes availble for assignment to job, clear those not to 
- *	be used
+ *	           be used
  * IN min_nodes, max_nodes  - minimum and maximum number of nodes to allocate 
- *	to this job (considers slurm partition limits)
+ *	                      to this job (considers slurm partition limits)
+ * IN test_only - test to see if job is ever runnable, 
+ *                or (false) runable right now
+ * IN test_only - if true, only test if ever could run, not necessarily now
  * IN test_only - if true, only test if ever could run, not necessarily now
  * RET - SLURM_SUCCESS if job runnable now, error code otherwise 
  */
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c
index a391bdfc623..9e8ccc525eb 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.c
@@ -52,11 +52,9 @@
 #include "src/slurmctld/proc_req.h"
 #include "bluegene.h"
 
-#ifdef HAVE_BG_FILES
-
 #define MAX_POLL_RETRIES    220
 #define POLL_INTERVAL        3
-#define MAX_AGENT_COUNT      130
+
 enum update_op {START_OP, TERM_OP, SYNC_OP};
 
 typedef struct bg_update {
@@ -70,8 +68,13 @@ typedef struct bg_update {
 static List bg_update_list = NULL;
 
 static pthread_mutex_t agent_cnt_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t job_start_mutex = PTHREAD_MUTEX_INITIALIZER;
 static int agent_cnt = 0;
 
+#ifdef HAVE_BG_FILES
+static int	_remove_job(db_job_id_t job_id);
+#endif
+
 static void	_bg_list_del(void *x);
 static int	_excise_block(List block_list, 
 			      pm_partition_id_t bg_block_id, 
@@ -79,23 +82,12 @@ static int	_excise_block(List block_list,
 static List	_get_all_blocks(void);
 static void *	_block_agent(void *args);
 static void	_block_op(bg_update_t *bg_update_ptr);
-static int	_remove_job(db_job_id_t job_id);
 static void	_start_agent(bg_update_t *bg_update_ptr);
 static void	_sync_agent(bg_update_t *bg_update_ptr);
 static void	_term_agent(bg_update_t *bg_update_ptr);
 
 
-/* Delete a bg_update_t record */
-static void _bg_list_del(void *x)
-{
-	bg_update_t *bg_update_ptr = (bg_update_t *) x;
-
-	if (bg_update_ptr) {
-		xfree(bg_update_ptr->bg_block_id);
-		xfree(bg_update_ptr);
-	}
-}
-
+#ifdef HAVE_BG_FILES
 /* Kill a job and remove its record from MMCS */
 static int _remove_job(db_job_id_t job_id)
 {
@@ -176,8 +168,18 @@ static int _remove_job(db_job_id_t job_id)
 	error("Failed to remove job %d from MMCS", job_id);
 	return INTERNAL_ERROR;
 }
+#endif
 
+/* Delete a bg_update_t record */
+static void _bg_list_del(void *x)
+{
+	bg_update_t *bg_update_ptr = (bg_update_t *) x;
 
+	if (bg_update_ptr) {
+		xfree(bg_update_ptr->bg_block_id);
+		xfree(bg_update_ptr);
+	}
+}
 
 /* Update block user and reboot as needed */
 static void _sync_agent(bg_update_t *bg_update_ptr)
@@ -191,9 +193,10 @@ static void _sync_agent(bg_update_t *bg_update_ptr)
 	}
 	slurm_mutex_lock(&block_state_mutex);
 	bg_record->job_running = bg_update_ptr->job_id;
+	list_push(bg_job_block_list, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);
-	
-	if(bg_record->state==RM_PARTITION_READY) {
+
+	if(bg_record->state == RM_PARTITION_READY) {
 		if(bg_record->user_uid != bg_update_ptr->uid) {
 			slurm_mutex_lock(&block_state_mutex);
 			debug("User isn't correct for job %d on %s, "
@@ -228,21 +231,18 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	bg_record_t *bg_record = NULL;
 	bg_record_t *found_record = NULL;
 	ListIterator itr;
-	pthread_attr_t attr_agent;
-	pthread_t thread_agent;
-	int retries;
-	
+	List delete_list;
+
+	slurm_mutex_lock(&job_start_mutex);
+		
 	bg_record = find_bg_record(bg_update_ptr->bg_block_id);
 	if(!bg_record) {
 		error("block %s not found in bg_list",
 		      bg_update_ptr->bg_block_id);
+		slurm_mutex_unlock(&job_start_mutex);
 		return;
 	}
 
-	slurm_mutex_lock(&block_state_mutex);
-	bg_record->job_running = bg_update_ptr->job_id;
-	slurm_mutex_unlock(&block_state_mutex);
-			
 	if(bg_record->state == RM_PARTITION_DEALLOCATING) {
 		debug("Block is in Deallocating state, waiting for free.");
 		bg_free_block(bg_record);
@@ -251,63 +251,43 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	if(bg_record->state == RM_PARTITION_FREE) {
 		num_block_to_free = 0;
 		num_block_freed = 0;
+		slurm_mutex_lock(&block_state_mutex);
+		delete_list = list_create(NULL);
 		itr = list_iterator_create(bg_list);
-		if(bg_record->full_block) {
-			debug("Using full block freeing all others");
-			while ((found_record = (bg_record_t*) 
-				list_next(itr)) != NULL) {
-				if(found_record->state != RM_PARTITION_FREE) {
-					slurm_attr_init(&attr_agent);
-					if (pthread_attr_setdetachstate(
-						    &attr_agent, 
-						    PTHREAD_CREATE_JOINABLE))
-						error("pthread_attr_setdetach"
-						      "state error %m");
-
-					retries = 0;
-					while (pthread_create(&thread_agent, 
-							      &attr_agent, 
-							      mult_free_block, 
-							      (void *)
-							      found_record)) {
-						error("pthread_create "
-						      "error %m");
-						if (++retries 
-						    > MAX_PTHREAD_RETRIES)
-							fatal("Can't create "
-							      "pthread");
-						/* sleep and retry */
-						usleep(1000);	
-					}
-					num_block_to_free++;
-				}
-			}		
-		} else {
-			while ((found_record = (bg_record_t*) 
-				list_next(itr)) != NULL) {
-				if (found_record->full_block) {
-					if(found_record->state 
-					   != RM_PARTITION_FREE) {
-						debug("destroying the "
-						      "full block %s.", 
-						      found_record->
-						      bg_block_id);
-						bg_free_block(
-							found_record);
-					}
-					break;
-				}
+		while ((found_record = (bg_record_t*) 
+			list_next(itr)) != NULL) {
+			if ((!found_record) || (bg_record == found_record))
+				continue;
+
+			if(!blocks_overlap(bg_record, found_record)) {
+				debug2("block %s isn't part of %s",
+				      found_record->bg_block_id, 
+				      bg_record->bg_block_id);
+				continue;
 			}
-		} 
+									
+			debug("need to make sure %s is free, it's part of %s",
+			      found_record->bg_block_id, 
+			      bg_record->bg_block_id);
+			list_push(delete_list, found_record);
+			if(bluegene_layout_mode == LAYOUT_DYNAMIC)
+				list_remove(itr);
+			num_block_to_free++;
+		}		
 		list_iterator_destroy(itr);
-		
+	        free_block_list(delete_list);
+		list_destroy(delete_list);
+		slurm_mutex_unlock(&block_state_mutex);
+	
 		/* wait for all necessary blocks to be freed */
 		while(num_block_to_free != num_block_freed) {
 			sleep(1);
 		}
 		
-		if(bg_record->job_running == -1) 
+		if(bg_record->job_running == NO_VAL) {
+			slurm_mutex_unlock(&job_start_mutex);
 			return;
+		}
 		if((rc = boot_block(bg_record))
 		   != SLURM_SUCCESS) {
 			sleep(2);	
@@ -315,15 +295,16 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 			   the batch script, slurm_fail_job() 
 			   is a no-op if issued prior 
 			   to the script initiation */
-			(void) slurm_fail_job(
-				bg_update_ptr->job_id);
+			(void) slurm_fail_job(bg_update_ptr->job_id);
+			slurm_mutex_unlock(&job_start_mutex);
 			return;
 		}
 	} else if (bg_record->state == RM_PARTITION_CONFIGURING) {
 		bg_record->boot_state = 1;		
 	}
-
+	
 	slurm_mutex_lock(&block_state_mutex);
+
 	bg_record->boot_count = 0;
 	xfree(bg_record->target_name);
 	bg_record->target_name = xstrdup(uid_to_string(bg_update_ptr->uid));
@@ -338,28 +319,32 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		set_block_user(bg_record); 
 	}
 	slurm_mutex_unlock(&block_state_mutex);	
+	slurm_mutex_unlock(&job_start_mutex);
+	
 }
 
 /* Perform job termination work */
 static void _term_agent(bg_update_t *bg_update_ptr)
 {
-	int i, jobs, rc;
-	rm_job_list_t *job_list = NULL;
-	int live_states;
-	rm_element_t *job_elem = NULL;
-	pm_partition_id_t block_id;
-	db_job_id_t job_id;
 	bg_record_t *bg_record = NULL;
 	time_t now;
 	struct tm *time_ptr;
 	char reason[128];
 	int job_remove_failed = 0;
-
+	
+#ifdef HAVE_BG_FILES
+	rm_element_t *job_elem = NULL;
+	rm_job_list_t *job_list = NULL;
+	db_job_id_t job_id;
+	int live_states;
+	pm_partition_id_t block_id;
+	int i, jobs, rc;
+	
 	debug2("getting the job info");
 	live_states = JOB_ALL_FLAG 
 		& (~JOB_TERMINATED_FLAG) 
-		& (~JOB_ERROR_FLAG)
-		& (~JOB_KILLED_FLAG);
+		& (~JOB_KILLED_FLAG)
+		& (~JOB_ERROR_FLAG);
 	slurm_mutex_lock(&api_file_mutex);
 	if ((rc = rm_get_jobs(live_states, &job_list)) != STATUS_OK) {
 		error("rm_get_jobs(): %s", bg_err_str(rc));
@@ -432,7 +417,7 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 			break;
 		}
 	}
-	
+#endif
 	/* remove the block's users */
 	bg_record = find_bg_record(bg_update_ptr->bg_block_id);
 	if(bg_record) {
@@ -456,7 +441,7 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 		}
 			
 		slurm_mutex_lock(&block_state_mutex);
-		bg_record->job_running = -1;
+		bg_record->job_running = NO_VAL;
 		
 		/*remove user from list */
 		if(bg_record->target_name) {
@@ -479,17 +464,20 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 		
 		last_bg_update = time(NULL);
 		slurm_mutex_unlock(&block_state_mutex);
+		remove_from_bg_list(bg_job_block_list, bg_record);
 	} 
-not_removed:
+#ifdef HAVE_BG_FILES
 	if ((rc = rm_free_job_list(job_list)) != STATUS_OK)
 		error("rm_free_job_list(): %s", bg_err_str(rc));
+#endif
+	
 }
 	
 /* Process requests off the bg_update_list queue and exit when done */
 static void *_block_agent(void *args)
 {
-	bg_update_t *bg_update_ptr;
-
+	bg_update_t *bg_update_ptr = NULL;
+	
 	/*
 	 * Don't just exit when there is no work left. Creating 
 	 * pthreads from within a dynamically linked object (plugin)
@@ -528,14 +516,15 @@ static void _block_op(bg_update_t *bg_update_ptr)
 	
 	slurm_mutex_lock(&agent_cnt_mutex);
 	if ((bg_update_list == NULL)
-	&&  ((bg_update_list = list_create(_bg_list_del)) == NULL))
+	    &&  ((bg_update_list = list_create(_bg_list_del)) == NULL))
 		fatal("malloc failure in start_job/list_create");
 
 	/* push job onto queue in a FIFO */
 	if (list_push(bg_update_list, bg_update_ptr) == NULL)
 		fatal("malloc failure in _block_op/list_push");
-	
-	if (agent_cnt > MAX_AGENT_COUNT) {	/* already running an agent */
+	/* already running MAX_AGENTS we don't really need more 
+	   since they never end */
+	if (agent_cnt > MAX_AGENT_COUNT) {
 		slurm_mutex_unlock(&agent_cnt_mutex);
 		return;
 	}
@@ -543,16 +532,19 @@ static void _block_op(bg_update_t *bg_update_ptr)
 	slurm_mutex_unlock(&agent_cnt_mutex);
 	/* spawn an agent */
 	slurm_attr_init(&attr_agent);
-	if (pthread_attr_setdetachstate(&attr_agent, PTHREAD_CREATE_JOINABLE))
+	if (pthread_attr_setdetachstate(&attr_agent, 
+					PTHREAD_CREATE_DETACHED))
 		error("pthread_attr_setdetachstate error %m");
-
+	
 	retries = 0;
-	while (pthread_create(&thread_agent, &attr_agent, _block_agent, NULL)) {
+	while (pthread_create(&thread_agent, &attr_agent, 
+			      _block_agent, NULL)) {
 		error("pthread_create error %m");
 		if (++retries > MAX_PTHREAD_RETRIES)
 			fatal("Can't create pthread");
 		usleep(1000);	/* sleep and retry */
 	}
+	pthread_attr_destroy(&attr_agent);
 }
 
 
@@ -651,8 +643,6 @@ int term_jobs_on_block(pm_partition_id_t bg_block_id)
 	return rc;
 }
 
-#endif
-
 /*
  * Perform any setup required to initiate a job
  * job_ptr IN - pointer to the job being initiated
@@ -667,7 +657,6 @@ extern int start_job(struct job_record *job_ptr)
 	int rc = SLURM_SUCCESS;
 	bg_record_t *bg_record = NULL;
 
-#ifdef HAVE_BG_FILES
 	bg_update_t *bg_update_ptr = NULL;
 
 	bg_update_ptr = xmalloc(sizeof(bg_update_t));
@@ -680,53 +669,17 @@ extern int start_job(struct job_record *job_ptr)
 		SELECT_DATA_NODE_USE, &(bg_update_ptr->node_use));
 	bg_record = find_bg_record(bg_update_ptr->bg_block_id);
 	if (bg_record) {
-		job_ptr->num_procs = (bg_record->cnodes_per_bp *
+		job_ptr->num_procs = (bg_record->cpus_per_bp *
 			bg_record->bp_count);
+		slurm_mutex_lock(&block_state_mutex);
+		bg_record->job_running = bg_update_ptr->job_id;
+		list_push(bg_job_block_list, bg_record);
+		slurm_mutex_unlock(&block_state_mutex);
 	}
 	info("Queue start of job %u in BG block %s",
 	     job_ptr->job_id, 
 	     bg_update_ptr->bg_block_id);
 	_block_op(bg_update_ptr);
-#else
-	ListIterator itr;
-	bg_record_t *found_record = NULL;
-	char *block_id = NULL;
-	uint16_t node_use;
-
-	if (bg_list) {
-		
-		select_g_get_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_BLOCK_ID, &block_id);
-		select_g_get_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_NODE_USE, &node_use);
-		if(!block_id) {
-			error("NO block_id");
-			return rc;
-		}
-		bg_record = find_bg_record(block_id);
-		if (bg_record) {
-			job_ptr->num_procs = (bg_record->cnodes_per_bp *
-				bg_record->bp_count);
-		}
-		itr = list_iterator_create(bg_list);
-		while ((found_record = (bg_record_t *) list_next(itr))) {
-			if (bg_record->full_block)
-				found_record->state = RM_PARTITION_FREE;
-			else if(found_record->full_block)
-				found_record->state = RM_PARTITION_FREE;
-			if ((!found_record->bg_block_id)
-			    ||  (strcmp(block_id, found_record->bg_block_id)))
-				continue;
-			found_record->job_running = job_ptr->job_id;
-			found_record->node_use = node_use;
-			found_record->state = RM_PARTITION_READY;
-			last_bg_update = time(NULL);
-			break;
-		}
-		list_iterator_destroy(itr);
-		xfree(block_id);
-	}
-#endif
 	return rc;
 }
 
@@ -743,8 +696,6 @@ extern int start_job(struct job_record *job_ptr)
 int term_job(struct job_record *job_ptr)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG_FILES
-
 	bg_update_t *bg_update_ptr = NULL;
 	
 	bg_update_ptr = xmalloc(sizeof(bg_update_t));
@@ -756,28 +707,7 @@ int term_job(struct job_record *job_ptr)
 	info("Queue termination of job %u in BG block %s",
 		job_ptr->job_id, bg_update_ptr->bg_block_id);
 	_block_op(bg_update_ptr);
-#else
-	bg_record_t *bg_record;
-	char *block_id = NULL;
-		
-	if (bg_list) {
-		
-		select_g_get_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_BLOCK_ID, &block_id);
-		if(!block_id) {
-			error("NO block_id");
-			return rc;
-		}
-		bg_record = find_bg_record(block_id);
-		info("Finished job %u in BG block %s",
-		     job_ptr->job_id, 
-		     bg_record->bg_block_id);
-		bg_record->state = RM_PARTITION_FREE;
-		bg_record->job_running = -1;
-		last_bg_update = time(NULL);		
-		xfree(block_id);
-	}
-#endif
+
 	return rc;
 }
 
@@ -788,7 +718,6 @@ int term_job(struct job_record *job_ptr)
  */
 extern int sync_jobs(List job_list)
 {
-#ifdef HAVE_BG_FILES
 	ListIterator job_iterator, block_iterator;
 	struct job_record  *job_ptr = NULL;
 	bg_update_t *bg_update_ptr = NULL;
@@ -839,8 +768,7 @@ extern int sync_jobs(List job_list)
 				job_ptr->job_state = JOB_FAILED 
 					| JOB_COMPLETING;
 				job_ptr->end_time = time(NULL);
-				xfree(bg_update_ptr->bg_block_id);
-				xfree(bg_update_ptr);
+				_bg_list_del(bg_update_ptr);
 				continue;
 			}
 
@@ -876,7 +804,6 @@ extern int sync_jobs(List job_list)
 		error("sync_jobs: no block_list");
 		return SLURM_ERROR;
 	}
-#endif
 	return SLURM_SUCCESS;
 }
 
@@ -931,6 +858,13 @@ extern int boot_block(bg_record_t *bg_record)
 	//bg_record->boot_count = 0;
 	last_bg_update = time(NULL);
 	slurm_mutex_unlock(&block_state_mutex);
+#else
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record->state = RM_PARTITION_READY;
+	last_bg_update = time(NULL);
+	slurm_mutex_unlock(&block_state_mutex);				
 #endif
+	
+
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/select/bluegene/plugin/bg_switch_connections.c b/src/plugins/select/bluegene/plugin/bg_switch_connections.c
index 6049945e5e3..ad35fb9381a 100644
--- a/src/plugins/select/bluegene/plugin/bg_switch_connections.c
+++ b/src/plugins/select/bluegene/plugin/bg_switch_connections.c
@@ -320,11 +320,11 @@ extern int configure_small_block(bg_record_t *bg_record)
 	ListIterator itr;
 	ba_node_t* ba_node = NULL;
 	int rc = SLURM_SUCCESS;
-	rm_BP_t *curr_bp;
+	rm_BP_t *curr_bp = NULL;
 	rm_bp_id_t bp_id = NULL;
-	int num_ncards = 4;
+	int num_ncards = 0;
 	rm_nodecard_t *ncard;
-	rm_nodecard_list_t *ncard_list;
+	rm_nodecard_list_t *ncard_list = NULL;
 	rm_quarter_t quarter;
 	int num, i;
 
@@ -342,6 +342,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 		fatal("rm_set_data(RM_PartitionPsetsPerBP)", bg_err_str(rc));
 	}
 
+	num_ncards = bg_record->node_cnt/bluegene_nc_node_cnt;
+
 	if ((rc = rm_set_data(bg_record->bg_block,
 			      RM_PartitionNodeCardNum,
 			      &num_ncards))
@@ -435,7 +437,17 @@ extern int configure_small_block(bg_record_t *bg_record)
 		}
 		if(bg_record->quarter != quarter)
 			continue;
+<<<<<<< .working
+		slurm_mutex_lock(&api_file_mutex);
+=======
+
+		if(bg_record->segment != NO_VAL) {
+			if(bg_record->segment != (i%4))
+				continue;
+		}
+
 		slurm_mutex_lock(&api_file_mutex);
+>>>>>>> .merge-right.r7390
 		if (num_ncards) {
 			if ((rc = rm_set_data(bg_record->bg_block,
 					      RM_PartitionNextNodeCard, 
@@ -678,7 +690,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 
 		if(found_bpid==BA_SYSTEM_DIMENSIONS) {
 						
-			debug2("adding midplane %d%d%d",
+			debug2("adding bp %d%d%d",
 			       bg_bp->coord[X],
 			       bg_bp->coord[Y],
 			       bg_bp->coord[Z]);
diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c
index c66708150f1..a75011ceace 100755
--- a/src/plugins/select/bluegene/plugin/block_sys.c
+++ b/src/plugins/select/bluegene/plugin/block_sys.c
@@ -94,38 +94,39 @@ static void _print_list(List list)
 static void _pre_allocate(bg_record_t *bg_record)
 {
 	int rc;
-	int send_psets=numpsets;
+	int send_psets=bluegene_numpsets;
 
 	slurm_mutex_lock(&api_file_mutex);
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionBlrtsImg,   
-			bluegene_blrts)) != STATUS_OK)
+			      bluegene_blrts)) != STATUS_OK)
 		error("rm_set_data(RM_PartitionBlrtsImg)", bg_err_str(rc));
 
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionLinuxImg,   
-			bluegene_linux)) != STATUS_OK) 
+			      bluegene_linux)) != STATUS_OK) 
 		error("rm_set_data(RM_PartitionLinuxImg)", bg_err_str(rc));
 
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionMloaderImg, 
-			bluegene_mloader)) != STATUS_OK)
+			      bluegene_mloader)) != STATUS_OK)
 		error("rm_set_data(RM_PartitionMloaderImg)", bg_err_str(rc));
 
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionRamdiskImg, 
-			bluegene_ramdisk)) != STATUS_OK)
+			      bluegene_ramdisk)) != STATUS_OK)
 		error("rm_set_data(RM_PartitionRamdiskImg)", bg_err_str(rc));
 
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionConnection, 
-			&bg_record->conn_type)) != STATUS_OK)
+			      &bg_record->conn_type)) != STATUS_OK)
 		error("rm_set_data(RM_PartitionConnection)", bg_err_str(rc));
 	
-	if(bg_record->cnodes_per_bp == (procs_per_node/4))
-		send_psets = numpsets/4;
+	rc = bluegene_mp_node_cnt/bg_record->node_cnt;
+	if(rc > 1)
+		send_psets = bluegene_numpsets/rc;
 	
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionPsetsPerBP, 
-			&send_psets)) != STATUS_OK)
+			      &send_psets)) != STATUS_OK)
 		error("rm_set_data(RM_PartitionPsetsPerBP)", bg_err_str(rc));
 
 	if ((rc = rm_set_data(bg_record->bg_block, RM_PartitionUserName, 
-			slurmctld_conf.slurm_user_name)) != STATUS_OK)
+			      slurmctld_conf.slurm_user_name)) != STATUS_OK)
 		error("rm_set_data(RM_PartitionUserName)", bg_err_str(rc));
 /* 	info("setting it here"); */
 /* 	bg_record->bg_block_id = "RMP101"; */
@@ -222,13 +223,111 @@ static int _post_bg_init_read(void *object, void *arg)
 
 	return SLURM_SUCCESS;
 }
+static int _find_32node_segment(bg_record_t *bg_record, 
+				rm_partition_t *block_ptr)
+{
+	char *my_card_name = NULL;
+	char *card_name = NULL;
+	rm_bp_id_t bp_id = NULL;
+	int segment = NO_VAL;
+	int card_count = 0;
+	int num = 0;
+	int i=0;
+	int rc;
+	rm_nodecard_list_t *ncard_list = NULL;
+	rm_nodecard_t *ncard = NULL;
+	rm_BP_t *curr_bp = NULL;
+	
+	if((rc = rm_get_data(block_ptr,
+			     RM_PartitionFirstNodeCard,
+			     &ncard))
+	   != STATUS_OK) {
+		error("rm_get_data(RM_FirstCard): %s",
+		      bg_err_str(rc));
+	}
+	if((rc = rm_get_data(ncard,
+			     RM_NodeCardID,
+			     &my_card_name))
+	   != STATUS_OK) {
+		error("rm_get_data(RM_NodeCardID): %s",
+		      bg_err_str(rc));
+	}
+	
+	if((rc = rm_get_data(block_ptr,
+			     RM_PartitionFirstBP,
+			     &curr_bp))
+	   != STATUS_OK) {
+		error("rm_get_data(RM_PartitionFirstBP): %s",
+		      bg_err_str(rc));
+	}
+	if ((rc = rm_get_data(curr_bp, RM_BPID, &bp_id))
+	    != STATUS_OK) {
+		error("rm_get_data(RM_BPID): %d", rc);
+		return SLURM_ERROR;
+	}
+	
+	if ((rc = rm_get_nodecards(bp_id, &ncard_list))
+	    != STATUS_OK) {
+		error("rm_get_nodecards(%s): %d",
+		       bp_id, rc);
+		free(bp_id);
+		return SLURM_ERROR;
+	}
+	free(bp_id);
+	if((rc = rm_get_data(ncard_list, RM_NodeCardListSize, &num))
+	   != STATUS_OK) {
+		error("rm_get_data(RM_NodeCardListSize): %s", bg_err_str(rc));
+		return SLURM_ERROR;
+	}
+	
+	for(i=0; i<num; i++) {
+		if (i) {
+			if ((rc = 
+			     rm_get_data(ncard_list, 
+					 RM_NodeCardListNext, 
+					 &ncard)) != STATUS_OK) {
+				error("rm_get_data(RM_NodeCardListNext): %s",
+				      rc);
+				rc = SLURM_ERROR;
+				goto cleanup;
+			}
+		} else {
+			if ((rc = rm_get_data(ncard_list, 
+					      RM_NodeCardListFirst, 
+					      &ncard)) != STATUS_OK) {
+				error("rm_get_data(RM_NodeCardListFirst: %s",
+				      rc);
+				rc = SLURM_ERROR;
+				goto cleanup;
+			}
+		}
+		if ((rc = rm_get_data(ncard, 
+				      RM_NodeCardID, 
+				      &card_name)) != STATUS_OK) {
+			error("rm_get_data(RM_NodeCardID: %s",
+			      rc);
+			rc = SLURM_ERROR;
+			goto cleanup;
+		}
+		if(strcmp(my_card_name,card_name)) {
+			free(card_name);
+			continue;
+		}
+		free(card_name);
+		bg_record->segment = (i%4);
+		break;
+	}
+cleanup:
+	free(my_card_name);
+	return SLURM_SUCCESS;
+}
 
 extern int configure_block(bg_record_t *bg_record)
 {
 	/* new partition to be added */
 	rm_new_partition(&bg_record->bg_block); 
 	_pre_allocate(bg_record);
-	if(bg_record->cnodes_per_bp < procs_per_node)
+	if(bg_record->cpus_per_bp < procs_per_node)
 		configure_small_block(bg_record);
 	else
 		configure_block_switches(bg_record);
@@ -252,7 +351,7 @@ int read_bg_blocks()
 	bg_record_t *bg_record = NULL;
 	struct passwd *pw_ent = NULL;
 	
-	int *coord;
+	int *coord = NULL;
 	int block_number, block_count;
 	char *block_name = NULL;
 	rm_partition_list_t *block_list = NULL;
@@ -340,9 +439,10 @@ int read_bg_blocks()
 		
 		free(block_name);
 
-		bg_record->state = -1;
-		bg_record->quarter = -1;
-		bg_record->job_running = -1;
+		bg_record->state = NO_VAL;
+		bg_record->quarter = NO_VAL;
+		bg_record->segment = NO_VAL;
+		bg_record->job_running = NO_VAL;
 				
 		if ((rc = rm_get_data(block_ptr, 
 				      RM_PartitionBPNum, 
@@ -376,10 +476,42 @@ int read_bg_blocks()
 				error("rm_get_data(CardQuarter): %d",rc);
 				bp_cnt = 0;
 			}
-			debug("%s is in quarter %d",
+			if((rc = rm_get_data(block_ptr,
+					     RM_PartitionNodeCardNum,
+					     &i))
+			   != STATUS_OK) {
+				error("rm_get_data(RM_FirstCard): %s",
+				      bg_err_str(rc));
+				bp_cnt = 0;
+			}
+			if(i == 1) {
+				_find_32node_segment(bg_record, block_ptr);
+				i = 16;
+			} 
+			
+			bg_record->cpus_per_bp = procs_per_node/i;
+			bg_record->node_cnt = bluegene_mp_node_cnt/i;
+			
+			debug("%s is in quarter %d segment %d",
 			      bg_record->bg_block_id,
-			      bg_record->quarter);
-		} 
+			      bg_record->quarter,
+			      bg_record->segment);
+			bg_record->conn_type = SELECT_SMALL;
+			
+		} else {
+			bg_record->cpus_per_bp = procs_per_node;
+			bg_record->node_cnt =  bluegene_mp_node_cnt;
+
+			if ((rc = rm_get_data(block_ptr, 
+					      RM_PartitionConnection,
+					      &bg_record->conn_type))
+			    != STATUS_OK) {
+				error("rm_get_data"
+				      "(RM_PartitionConnection): %s",
+				      bg_err_str(rc));
+			}
+			
+		}
 
 		bg_record->bg_block_list = list_create(NULL);
 		bg_record->hostlist = hostlist_create(NULL);
@@ -428,34 +560,23 @@ int read_bg_blocks()
 			
 			coord = find_bp_loc(bpid);
 
-			free(bpid);
-
 			if(!coord) {
-				fatal("Could not find coordinates for BP ID %s",
-					(char *) bpid);
+				fatal("Could not find coordinates for "
+				      "BP ID %s", (char *) bpid);
 			}
+			free(bpid);
 
 			sprintf(node_name_tmp, 
-				 "%s%d%d%d\0", 
-				 slurmctld_conf.node_prefix,
-				 coord[X], coord[Y], coord[Z]);
+				"%s%d%d%d\0", 
+				slurmctld_conf.node_prefix,
+				coord[X], coord[Y], coord[Z]);
 			
 			hostlist_push(bg_record->hostlist, node_name_tmp);
 		}	
 		
 		// need to get the 000x000 range for nodes
 		// also need to get coords
-		if(small)
-			bg_record->conn_type = SELECT_SMALL;
-		else
-			if ((rc = rm_get_data(block_ptr, 
-					      RM_PartitionConnection,
-					      &bg_record->conn_type))
-			    != STATUS_OK) {
-				error("rm_get_data"
-				      "(RM_PartitionConnection): %s",
-				      bg_err_str(rc));
-			}
+		
 		if ((rc = rm_get_data(block_ptr, RM_PartitionMode,
 					 &bg_record->node_use))
 		    != STATUS_OK) {
@@ -539,11 +660,6 @@ int read_bg_blocks()
 			      bg_err_str(rc));
 		} 
 		
-		if(small)
-			bg_record->cnodes_per_bp = procs_per_node/4;
-		else
-			bg_record->cnodes_per_bp = procs_per_node;
-		
 		bg_record->block_lifecycle = STATIC;
 						
 clean_up:	if (bg_recover
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index faf2be73b35..da52d9b61a9 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -29,7 +29,7 @@
 #include <stdio.h>
 
 #define BUFSIZE 4096
-#define BITSIZE 128
+#define BITSZE 128
 #define MMCS_POLL_TIME 120	/* poll MMCS for down switches and nodes 
 				 * every 120 secs */
 #define BG_POLL_TIME 0	        /* poll bg blocks every 3 secs */
@@ -40,13 +40,20 @@ char* bg_conf = NULL;
 
 /* Global variables */
 rm_BGL_t *bg;
-List bg_list = NULL;			/* list of bg_record entries */
-List bg_curr_block_list = NULL;  	/* current bg blocks */
-List bg_found_block_list = NULL;  	/* found bg blocks */
+
+List bg_list = NULL;			/* total list of bg_record entries */
+List bg_curr_block_list = NULL;  	/* current bg blocks in bluegene.conf*/
+List bg_found_block_list = NULL;  	/* found bg blocks already on system */
+List bg_job_block_list = NULL;  	/* jobs running in these blocks */
+List bg_booted_block_list = NULL;  	/* blocks that are booted */
+
 char *bluegene_blrts = NULL, *bluegene_linux = NULL, *bluegene_mloader = NULL;
-char *bluegene_ramdisk = NULL, *bridge_api_file = NULL;
-char *change_numpsets = NULL;
-int numpsets;
+char *bluegene_ramdisk = NULL, *bridge_api_file = NULL; 
+bg_layout_t bluegene_layout_mode = NO_VAL;
+int bluegene_numpsets = 0;
+int bluegene_bp_node_cnt = 0;
+int bluegene_quarter_node_cnt = 0;
+int bluegene_segment_node_cnt = 0;
 bool agent_fini = false;
 int bridge_api_verb = 0;
 time_t last_bg_update;
@@ -54,10 +61,14 @@ pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
 int num_block_to_free = 0;
 int num_block_freed = 0;
 int blocks_are_created = 0;
-bg_record_t *full_system_block = NULL;
+
+pthread_mutex_t freed_cnt_mutex = PTHREAD_MUTEX_INITIALIZER;
+List bg_free_block_list = NULL;  	/* blocks to be deleted */
+List bg_destroy_block_list = NULL;       /* blocks to be destroyed */
+int free_cnt = 0;
+int destroy_cnt = 0;
 
 #ifdef HAVE_BG_FILES
-  static pthread_mutex_t freed_cnt_mutex = PTHREAD_MUTEX_INITIALIZER;
   static int _update_bg_record_state(List bg_destroy_list);
 #else
 # if BA_SYSTEM_DIMENSIONS==3
@@ -71,14 +82,19 @@ bg_record_t *full_system_block = NULL;
 #ifdef HAVE_BG
 static int  _addto_node_list(bg_record_t *bg_record, int *start, int *end);
 #endif
-#ifdef HAVE_BG_FILES
-#endif
+
 static void _set_bg_lists();
 static int  _validate_config_nodes(void);
-static int  _bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b);
+static int  _bg_record_cmpf_inc(bg_record_t *rec_a, bg_record_t *rec_b);
 static int _delete_old_blocks(void);
 static char *_get_bg_conf(void);
 static void _strip_13_10(char *line);
+static int _split_block(bg_record_t *bg_record, int procs, int *block_inx);
+static bg_record_t *_create_small_record(bg_record_t *bg_record, 
+					 int quarter, int segment);
+static int _add_bg_record(List records, char *nodes, 
+			  rm_connection_type_t conn_type, 
+			  int num_segment, int num_quarter);
 static int  _parse_bg_spec(char *in_line);
 static void _process_nodes(bg_record_t *bg_record);
 static int  _reopen_bridge_log(void);
@@ -132,23 +148,29 @@ extern void fini_bg(void)
 	if (bg_list) {
 		list_destroy(bg_list);
 		bg_list = NULL;
-	}
-	
+	}	
 	if (bg_curr_block_list) {
 		list_destroy(bg_curr_block_list);
 		bg_curr_block_list = NULL;
-	}
-	
+	}	
 	if (bg_found_block_list) {
 		list_destroy(bg_found_block_list);
 		bg_found_block_list = NULL;
 	}
-
+	if (bg_job_block_list) {
+		list_destroy(bg_job_block_list);
+		bg_job_block_list = NULL;
+	}
+	if (bg_booted_block_list) {
+		list_destroy(bg_booted_block_list);
+		bg_booted_block_list = NULL;
+	}
 	xfree(bluegene_blrts);
 	xfree(bluegene_linux);
 	xfree(bluegene_mloader);
 	xfree(bluegene_ramdisk);
 	xfree(bridge_api_file);
+	xfree(bluegene_layout_mode);
 
 #ifdef HAVE_BG_FILES
 	if(bg)
@@ -160,6 +182,8 @@ extern void fini_bg(void)
 
 extern void print_bg_record(bg_record_t* bg_record)
 {
+	char tmp_char[256];
+
 	if (!bg_record) {
 		error("print_bg_record, record given is null");
 		return;
@@ -169,7 +193,10 @@ extern void print_bg_record(bg_record_t* bg_record)
 	if (bg_record->bg_block_id)
 		info("\tbg_block_id: %s", bg_record->bg_block_id);
 	info("\tnodes: %s", bg_record->nodes);
-	info("\tsize: %d", bg_record->bp_count);
+	info("\tsize: %d BPs %d Nodes %d cpus", 
+	     bg_record->bp_count,
+	     bg_record->node_cnt,
+	     bg_record->cpus_per_bp * bg_record->bp_count);
 	info("\tgeo: %dx%dx%d", bg_record->geo[X], bg_record->geo[Y], 
 	     bg_record->geo[Z]);
 	info("\tlifecycle: %s", convert_lifecycle(bg_record->block_lifecycle));
@@ -186,16 +213,19 @@ extern void print_bg_record(bg_record_t* bg_record)
 		info("\tbitmap: %s", bitstring);
 	}
 #else
+	format_node_name(bg_record, tmp_char);
 	info("bg_block_id=%s nodes=%s", bg_record->bg_block_id, 
-	     bg_record->nodes);
+	     tmp_char);
 #endif
 }
 
-extern void destroy_bg_record(void* object)
+extern void destroy_bg_record(void *object)
 {
 	bg_record_t* bg_record = (bg_record_t*) object;
 
 	if (bg_record) {
+		xfree(bg_record->bg_block_id);
+		bg_record->bg_block_id = NULL;
 		xfree(bg_record->nodes);
 		xfree(bg_record->user_name);
 		xfree(bg_record->target_name);
@@ -205,12 +235,42 @@ extern void destroy_bg_record(void* object)
 			hostlist_destroy(bg_record->hostlist);
 		if(bg_record->bitmap)
 			bit_free(bg_record->bitmap);
-		xfree(bg_record->bg_block_id);
 		
 		xfree(bg_record);
+		bg_record = NULL;
 	}
 }
 
+extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
+{
+	xfree(sec_record->bg_block_id);
+	sec_record->bg_block_id = xstrdup(fir_record->bg_block_id);
+	xfree(sec_record->nodes);
+	sec_record->nodes = xstrdup(fir_record->nodes);
+	xfree(sec_record->user_name);
+	sec_record->user_name = xstrdup(fir_record->user_name);
+	xfree(sec_record->target_name);
+	sec_record->target_name = xstrdup(fir_record->target_name);
+	sec_record->user_uid = fir_record->user_uid;
+	sec_record->block_lifecycle = fir_record->block_lifecycle;
+	sec_record->state = fir_record->state;
+	sec_record->conn_type = fir_record->conn_type;
+	sec_record->node_use = fir_record->node_use;
+	sec_record->bp_count = fir_record->bp_count;
+	sec_record->switch_count = fir_record->switch_count;
+	sec_record->boot_state = fir_record->boot_state;
+	sec_record->boot_count = fir_record->boot_count;
+	if(sec_record->bitmap)
+		bit_free(sec_record->bitmap);
+	if((sec_record->bitmap = bit_copy(fir_record->bitmap)) == NULL) {
+		error("Unable to copy bitmap for", fir_record->nodes);
+	}
+	sec_record->job_running = fir_record->job_running;
+	sec_record->cpus_per_bp = fir_record->cpus_per_bp;
+	sec_record->node_cnt = fir_record->node_cnt;
+	sec_record->quarter = fir_record->quarter;
+	sec_record->segment = fir_record->segment;
+}
 
 extern bg_record_t *find_bg_record(char *bg_block_id)
 {
@@ -222,8 +282,7 @@ extern bg_record_t *find_bg_record(char *bg_block_id)
 			
 	if(bg_list) {
 		itr = list_iterator_create(bg_list);
-		while ((bg_record = 
-			(bg_record_t *) list_next(itr)) != NULL) {
+		while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
 			if(bg_record->bg_block_id)
 				if (!strcmp(bg_record->bg_block_id, 
 					    bg_block_id))
@@ -245,15 +304,18 @@ extern bg_record_t *find_bg_record(char *bg_block_id)
 */
 extern int update_block_user(bg_record_t *bg_record) 
 {
-#ifdef HAVE_BG_FILES
-	int rc=0;
 	struct passwd *pw_ent = NULL;
-	
+
 	if(!bg_record->target_name) {
 		error("Must set target_name to run update_block_user.");
 		return -1;
 	}
 
+#ifdef HAVE_BG_FILES
+	int rc=0;
+	
+	
+
 	if((rc = remove_all_users(bg_record->bg_block_id, 
 				  bg_record->target_name))
 	   == REMOVE_USER_ERR) {
@@ -279,6 +341,7 @@ extern int update_block_user(bg_record_t *bg_record)
 			} 
 		}
 	}
+#endif
 	
 	if(strcmp(bg_record->target_name, bg_record->user_name)) {
 		xfree(bg_record->user_name);
@@ -292,10 +355,56 @@ extern int update_block_user(bg_record_t *bg_record)
 		return 1;
 	}
 	
-#endif
 	return 0;
 }
 
+extern int format_node_name(bg_record_t *bg_record, char tmp_char[])
+{
+	if(bg_record->quarter != NO_VAL) {
+		if(bg_record->segment != NO_VAL) {
+			sprintf(tmp_char,"%s.%d.%d\0",
+				bg_record->nodes,
+				bg_record->quarter,
+				bg_record->segment);
+		} else {
+			sprintf(tmp_char,"%s.%d\0",
+				bg_record->nodes,
+				bg_record->quarter);
+		}
+	} else {
+		sprintf(tmp_char,"%s\0",bg_record->nodes);
+	}
+	return SLURM_SUCCESS;
+}
+
+extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b)
+{
+	bitstr_t *my_bitmap = NULL;
+	
+	my_bitmap = bit_copy(rec_a->bitmap);
+	bit_and(my_bitmap, rec_b->bitmap);
+	if (bit_ffs(my_bitmap) == -1) {
+		bit_free(my_bitmap);
+		return false;
+	}
+	bit_free(my_bitmap);
+		
+	if(rec_a->quarter != NO_VAL) {
+		if(rec_b->quarter == NO_VAL)
+			return true;
+		else if(rec_a->quarter != rec_b->quarter)
+			return false;
+		if(rec_a->segment != NO_VAL) {
+			if(rec_b->segment == NO_VAL)
+				return true;
+			else if(rec_a->segment 
+				!= rec_b->segment)
+				return false;
+		}				
+	}
+	return true;
+}
+
 extern int remove_all_users(char *bg_block_id, char *user_name) 
 {
 	int returnc = REMOVE_USER_NONE;
@@ -400,6 +509,7 @@ extern void set_block_user(bg_record_t *bg_record)
 	xfree(bg_record->target_name);
 	bg_record->target_name = 
 		xstrdup(slurmctld_conf.slurm_user_name);
+	list_push(bg_booted_block_list, bg_record);			
 }
 
 extern char* convert_lifecycle(lifecycle_type_t lifecycle)
@@ -448,6 +558,7 @@ extern void sort_bg_record_inc_size(List records){
 		return;
 	slurm_mutex_lock(&block_state_mutex);
 	list_sort(records, (ListCmpF) _bg_record_cmpf_inc);
+	last_bg_update = time(NULL);
 	slurm_mutex_unlock(&block_state_mutex);
 }
 
@@ -532,13 +643,14 @@ extern char *bg_err_str(status_t inx)
 }
 
 /*
- * create_static_blocks - create the static blocks that will be used
- *   for scheduling.  
- * IN/OUT block_list - (global, from slurmctld): SLURM's block 
- *   configurations. Fill in bg_block_id 
+ * create_defined_blocks - create the static blocks that will be used
+ * for scheduling, all partitions must be able to be created and booted
+ * at once.  
+ * IN - int overlapped, 1 if partitions are to be overlapped, 0 if they are
+ * static.
  * RET - success of fitting all configurations
  */
-extern int create_static_blocks(List block_list)
+extern int create_defined_blocks(bg_layout_t overlapped)
 {
 	int rc = SLURM_SUCCESS;
 
@@ -546,9 +658,10 @@ extern int create_static_blocks(List block_list)
 	struct passwd *pw_ent = NULL;
 	bg_record_t *bg_record = NULL, *found_record = NULL;
 	char *name = NULL;
-#ifndef HAVE_BG_FILES
-	static int block_inx = 0;
-#else
+	int geo[BA_SYSTEM_DIMENSIONS];
+	int i;
+
+#ifdef HAVE_BG_FILES
 	ListIterator itr_found;
 	init_wires();
 #endif
@@ -557,22 +670,25 @@ extern int create_static_blocks(List block_list)
 		
 	if(bg_list) {
 		itr = list_iterator_create(bg_list);
-		while ((bg_record = (bg_record_t *) list_next(itr)) 
-		       != NULL) {			
+		while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
 			if(bg_record->bp_count>0 
 			   && !bg_record->full_block
-			   && bg_record->cnodes_per_bp == procs_per_node) {
-				debug("adding %s %d%d%d",
-				      bg_record->nodes,
-				      bg_record->start[X],
-				      bg_record->start[Y],
-				      bg_record->start[Z]);
+			   && bg_record->cpus_per_bp == procs_per_node) {
+				if(overlapped == LAYOUT_OVERLAP)
+					reset_ba_system();
+				for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+					geo[i] = bg_record->geo[i];
+				debug2("adding %s starting at %d%d%d",
+				       bg_record->nodes,
+				       bg_record->start[X],
+				       bg_record->start[Y],
+				       bg_record->start[Z]);
 				name = set_bg_block(NULL,
 						    bg_record->start, 
-						    bg_record->geo, 
+						    geo, 
 						    bg_record->conn_type);
 				if(!name) {
-					error("I was unable to make the "
+					debug("I was unable to make the "
 					      "requested block.");
 					slurm_mutex_unlock(&block_state_mutex);
 					return SLURM_ERROR;
@@ -582,7 +698,7 @@ extern int create_static_blocks(List block_list)
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("create_static_blocks: no bg_list 1");
+		error("create_defined_blocks: no bg_list 1");
 		slurm_mutex_unlock(&block_state_mutex);
 		return SLURM_ERROR;
 	}
@@ -597,23 +713,25 @@ extern int create_static_blocks(List block_list)
 					bg_found_block_list);
 				while ((found_record = (bg_record_t*) 
 					list_next(itr_found)) != NULL) {
-					/*printf("%s %d %s %d\n",*/
+					/*info("%s %d %s %d\n",*/
 /* 					       bg_record->nodes, */
 /* 					       bg_record->quarter, */
 /* 					       found_record->nodes, */
 /* 					       found_record->quarter); */
 					
-					if ((!strcmp(bg_record->nodes, 
-						     found_record->nodes))
+					if ((bit_equal(bg_record->bitmap, 
+						       found_record->bitmap))
 					    && (bg_record->quarter ==
-						found_record->quarter)) {
+						found_record->quarter)
+					    && (bg_record->segment ==
+						found_record->segment)) {
 						/* don't reboot this one */
 						break;	
 					}
 				}
 				list_iterator_destroy(itr_found);
 			} else {
-				error("create_static_blocks: "
+				error("create_defined_blocks: "
 				      "no bg_found_block_list 1");
 			}
 			if(found_record == NULL) {
@@ -628,49 +746,340 @@ extern int create_static_blocks(List block_list)
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("create_static_blocks: no bg_list 2");
+		error("create_defined_blocks: no bg_list 2");
 		slurm_mutex_unlock(&block_state_mutex);
 		return SLURM_ERROR;
 	}
 #endif
+	last_bg_update = time(NULL);
+	slurm_mutex_unlock(&block_state_mutex);
+	create_full_system_block();
+
+	sort_bg_record_inc_size(bg_list);
+
+	
+#ifndef HAVE_BG_FILES
+	char tmp_char[256];
+	static int block_inx = 0;
+	if(bg_list) {
+		slurm_mutex_lock(&block_state_mutex);
+		itr = list_iterator_create(bg_list);
+		while ((bg_record = (bg_record_t*) list_next(itr))) {
+			if (bg_record->bg_block_id)
+				continue;
+			bg_record->bg_block_id = xmalloc(sizeof(char)*8);
+			snprintf(bg_record->bg_block_id, 8, "RMP%d", 
+				 block_inx++);
+			format_node_name(bg_record, tmp_char);
+			info("BG BlockID:%s Nodes:%s Conn:%s Mode:%s",
+			     bg_record->bg_block_id, tmp_char,
+			     convert_conn_type(bg_record->conn_type),
+			     convert_node_use(bg_record->node_use));
+		}
+		list_iterator_destroy(itr);
+	} else {
+		error("create_defined_blocks: no bg_list 4");
+		slurm_mutex_unlock(&block_state_mutex);
+		return SLURM_ERROR;
+	}
+	
+	slurm_mutex_unlock(&block_state_mutex);
+	
+#endif	/* not have HAVE_BG_FILES */
+	
+
+#ifdef _PRINT_BLOCKS_AND_EXIT
+	if(bg_list) {
+		itr = list_iterator_create(bg_list);
+		debug("\n\n");
+		while ((found_record = (bg_record_t *) list_next(itr)) 
+		       != NULL) {
+			print_bg_record(found_record);
+		}
+		list_iterator_destroy(itr);
+	} else {
+		error("create_defined_blocks: no bg_list 5");
+	}
+ 	exit(0);
+#endif	/* _PRINT_BLOCKS_AND_EXIT */
+	rc = SLURM_SUCCESS;
+	//exit(0);
+	return rc;
+}
+
+
+
+/*
+ * create_dynamic_block - create a new block to be used for a new
+ * job allocation.  This will be added to the booted and job bg_lists.
+ * RET - success of fitting configuration in the running system.
+ */
+extern int create_dynamic_block(ba_request_t *request, List my_block_list)
+{
+	int rc = SLURM_SUCCESS;
+	static int block_inx = 0;
+	
+	ListIterator itr;
+	bg_record_t *bg_record = NULL;
+	bg_record_t *found_record = NULL;
+	List results = list_create(NULL);
+	int num_quarter=0, num_segment=0;
+	char *name = NULL;
+	int geo[BA_SYSTEM_DIMENSIONS];
+	int proc_cnt=0;
+	int i;
+	
+	slurm_mutex_lock(&block_state_mutex);
+	reset_ba_system();
+		
+	if(my_block_list) {
+		itr = list_iterator_create(my_block_list);
+		while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
+			if(bg_record->bp_count>0 
+			   && (bg_record->cpus_per_bp == procs_per_node
+			       || (bg_record->quarter == 0 
+				   && bg_record->segment < 1))) {
+				for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+					geo[i] = bg_record->geo[i];
+				debug2("adding %s %d%d%d %d%d%d",
+				       bg_record->nodes,
+				       bg_record->start[X],
+				       bg_record->start[Y],
+				       bg_record->start[Z],
+				       geo[X],
+				       geo[Y],
+				       geo[Z]);
+				name = set_bg_block(NULL,
+						    bg_record->start, 
+						    geo, 
+						    bg_record->conn_type);
+				if(!name) {
+					debug("I was unable to make the "
+					       "requested block.");
+					slurm_mutex_unlock(&block_state_mutex);
+					return SLURM_ERROR;
+				}
+				xfree(name);
+			} 
+		}
+		list_iterator_destroy(itr);
+	} else {
+		debug("No list was given");
+	}
+	
+	if(request->size==1 && request->procs < bluegene_bp_node_cnt) {
+		debug("proc count = %d size = %d",
+		     request->procs, request->size);
+		request->conn_type = SELECT_SMALL;
+		if(request->procs == (procs_per_node/16)) {
+			num_segment=4;
+			num_quarter=3;
+		} else {
+			num_quarter=4;
+		}
+		if(bg_list) {
+			itr = list_iterator_create(bg_list);
+			while ((bg_record = (bg_record_t *) list_next(itr)) 
+			       != NULL) {
+				if(bg_record->job_running != NO_VAL)
+					continue;
+				if(bg_record->state != RM_PARTITION_FREE)
+					continue;
+				proc_cnt = bg_record->bp_count * 
+					bg_record->cpus_per_bp;
+				if(proc_cnt == request->procs) {
+					debug2("found it here %s, %s",
+					       bg_record->bg_block_id,
+					       bg_record->nodes);
+					list_iterator_destroy(itr);
+					rc = SLURM_SUCCESS;
+					goto finished;
+				}
+				if(bg_record->node_cnt > bluegene_bp_node_cnt)
+					continue;
+				break;
+			}
+			if(bg_record || my_block_list) {
+				goto found_one;
+			}
+			list_iterator_destroy(itr);
+					
+			itr = list_iterator_create(bg_list);
+			while ((bg_record = (bg_record_t *) list_next(itr)) 
+			       != NULL) {
+				if(bg_record->job_running != NO_VAL)
+					continue;
+				proc_cnt = bg_record->bp_count * 
+					bg_record->cpus_per_bp;
+				if(proc_cnt == request->procs) {
+					debug2("found it here %s, %s",
+					       bg_record->bg_block_id,
+					       bg_record->nodes);
+					list_iterator_destroy(itr);
+					rc = SLURM_SUCCESS;
+					goto finished;
+				}
+				if(bg_record->node_cnt > bluegene_bp_node_cnt)
+					continue;
+				break;
+			}
+		found_one:
+			if(bg_record) {
+				debug("going to split %s, %s",
+				      bg_record->bg_block_id,
+				      bg_record->nodes);
+				if(_split_block(bg_record, request->procs,
+						&block_inx)
+				   == SLURM_SUCCESS) {
+					list_remove(itr);
+					destroy_bg_record(bg_record);
+				}
+				
+				list_iterator_destroy(itr);
+				rc = SLURM_SUCCESS;
+				goto finished;
+			}
+			list_iterator_destroy(itr);
+		}
+	}
+	
+	if(request->conn_type == SELECT_NAV)
+		request->conn_type = SELECT_TORUS;
+	
+	if(!new_ba_request(request)) {
+		error("Problems with request for size %d geo %dx%dx%d", 
+		      request->size,
+		      request->geometry[X], 
+		      request->geometry[Y], 
+		      request->geometry[Z]);
+		rc = SLURM_ERROR;
+		goto finished;
+	} 
+
+	if(!list_count(bg_list) || !my_block_list) {
+		bg_record = NULL;
+		goto no_list;
+	}
+
+	itr = list_iterator_create(bg_list);
+	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
+		request->rotate_count = 0;
+		request->elongate_count = 1;
+
+		if((bg_record->job_running == NO_VAL)
+		   && (bg_record->cpus_per_bp == procs_per_node
+		       || (bg_record->quarter == 0 
+			   && bg_record->segment < 1))) {
+			for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
+				request->start[i] = bg_record->start[i];
+			request->start_req = 1;
+			rc = SLURM_SUCCESS;
+			if (!allocate_block(request, results)){
+				debug2("allocate failure for %dx%dx%d", 
+				       request->geometry[X], 
+				       request->geometry[Y], 
+				       request->geometry[Z]);
+				rc = SLURM_ERROR;
+			} else 
+				break;
+		}
+	}
+	list_iterator_destroy(itr);
+
+no_list:
+	if(!bg_record) {
+		request->start_req = 0;
+		rc = SLURM_SUCCESS;
+		if (!allocate_block(request, results)){
+			debug("allocate failure for %dx%dx%d", 
+			      request->geometry[X], 
+			      request->geometry[Y], 
+			      request->geometry[Z]);
+			rc = SLURM_ERROR;
+		}
+	}
+
+	if(rc == SLURM_ERROR || !my_block_list) {
+		goto finished;
+	}
+	/*set up bg_record(s) here */
+	list_destroy(results);
+	results = list_create(destroy_bg_record);
+	_add_bg_record(results, request->save_name, 
+		       request->conn_type, num_segment, num_quarter);
+
+	while((bg_record = (bg_record_t *) list_pop(results)) != NULL) {
+#ifdef HAVE_BG_FILES
+		if((rc = configure_block(bg_record)) == SLURM_ERROR) {
+			xfree(bg_record);
+			error("unable to configure block in api");
+			goto finished;
+		}
+#else
+		bg_record->bg_block_id = xmalloc(sizeof(char)*8);
+		snprintf(bg_record->bg_block_id, 8, "RMP%d", 
+			 block_inx++);
+#endif
+		list_push(bg_list, bg_record);
+		print_bg_record(bg_record);
+	}
+
+finished:
+	if(my_block_list)
+		xfree(request->save_name);
+	if(request->elongate_geos)
+		list_destroy(request->elongate_geos);
+	if(results)
+		list_destroy(results);
+	
+	last_bg_update = time(NULL);
+	slurm_mutex_unlock(&block_state_mutex);
+	sort_bg_record_inc_size(bg_list);
+	
+	return rc;
+}
+
+extern int create_full_system_block()
+{
+	int rc = SLURM_SUCCESS;
+	ListIterator itr;
+	bg_record_t *bg_record = NULL;
+	char *name = NULL;
+	List records = NULL;
+	int geo[BA_SYSTEM_DIMENSIONS];
 	
 	/* Here we are adding a block that in for the entire machine 
 	   just in case it isn't in the bluegene.conf file.
 	*/
+	slurm_mutex_lock(&block_state_mutex);
 	
 	reset_ba_system();
 
-	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
-	
+	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));	
 
 #ifdef HAVE_BG_FILES
-	bg_record->geo[X] = DIM_SIZE[X] - 1;
-	bg_record->geo[Y] = DIM_SIZE[Y] - 1;
-	bg_record->geo[Z] = DIM_SIZE[Z] - 1;
+	geo[X] = DIM_SIZE[X] - 1;
+	geo[Y] = DIM_SIZE[Y] - 1;
+	geo[Z] = DIM_SIZE[Z] - 1;
 #else
-	bg_record->geo[X] = max_dim[X];
-	bg_record->geo[Y] = max_dim[Y];
-	bg_record->geo[Z] = max_dim[Z];
+	geo[X] = max_dim[X];
+	geo[Y] = max_dim[Y];
+	geo[Z] = max_dim[Z];
 #endif
 	name = xmalloc(sizeof(char)*(10+strlen(slurmctld_conf.node_prefix)));
-	if((bg_record->geo[X] == 0) && (bg_record->geo[Y] == 0)
-	&& (bg_record->geo[Z] == 0))
+	if((geo[X] == 0) && (geo[Y] == 0) && (geo[Z] == 0))
 		sprintf(name, "%s000\0", slurmctld_conf.node_prefix);
-       	else
+	else
 		sprintf(name, "%s[000x%d%d%d]\0",
 			slurmctld_conf.node_prefix,
-			bg_record->geo[X], bg_record->geo[Y], 
-			bg_record->geo[Z]);
-	bg_record->nodes = xstrdup(name);
-	xfree(name);
-	bg_record->quarter = -1;
-	bg_record->full_block = 1;
-       	if(bg_found_block_list) {
+			geo[X], geo[Y], geo[Z]);
+	
+	if(bg_found_block_list) {
 		itr = list_iterator_create(bg_found_block_list);
-		while ((found_record = (bg_record_t *) list_next(itr)) 
+		while ((bg_record = (bg_record_t *) list_next(itr)) 
 		       != NULL) {
-			if (!strcmp(bg_record->nodes, found_record->nodes)) {
-				destroy_bg_record(bg_record);
+			if (!strcmp(name, bg_record->nodes)) {
+				xfree(name);
 				list_iterator_destroy(itr);
 				/* don't create total already there */
 				goto no_total;	
@@ -678,151 +1087,103 @@ extern int create_static_blocks(List block_list)
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("create_static_blocks: no bg_found_block_list 2");
+		error("create_full_system_block: no bg_found_block_list 2");
 	}
 	
 	if(bg_list) {
 		itr = list_iterator_create(bg_list);
-		while ((found_record = (bg_record_t *) list_next(itr)) 
+		while ((bg_record = (bg_record_t *) list_next(itr)) 
 		       != NULL) {
-			if (!strcmp(bg_record->nodes, found_record->nodes)) {
-				destroy_bg_record(bg_record);
+			if (!strcmp(name, bg_record->nodes)) {
+				xfree(name);
 				list_iterator_destroy(itr);
-				/* don't create total already defined */
+				/* don't create total already there */
 				goto no_total;	
 			}
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("create_static_blocks: no bg_list 3");
-		slurm_mutex_unlock(&block_state_mutex);
-		return SLURM_ERROR;
-	}
-	full_system_block = bg_record;
-	bg_record->bg_block_list = list_create(NULL);			
-	bg_record->hostlist = hostlist_create(NULL);
-	/* bg_record->boot_state = 0;		Implicit */
-	_process_nodes(bg_record);
-	list_append(bg_list, bg_record);
-	
-	bg_record->conn_type = SELECT_TORUS;
-	bg_record->user_name = xstrdup(slurmctld_conf.slurm_user_name);
-	bg_record->target_name = xstrdup(slurmctld_conf.slurm_user_name);
-	if((pw_ent = getpwnam(bg_record->user_name)) == NULL) {
-		error("getpwnam(%s): %m", bg_record->user_name);
-		slurm_mutex_unlock(&block_state_mutex);
-		return SLURM_ERROR;
-	} else {
-		bg_record->user_uid = pw_ent->pw_uid;
+		xfree(name);
+		error("create_overlapped_blocks: no bg_list 3");
+		rc = SLURM_ERROR;
+		goto no_total;
 	}
-	
-	name = set_bg_block(NULL,
-			    bg_record->start, 
-			    bg_record->geo, 
-			    bg_record->conn_type);
 
-	if(!name) {
-		error("I was unable to make the "
-		      "requested block.");
-		slurm_mutex_unlock(&block_state_mutex);
-		return SLURM_ERROR;
-	}
+	records = list_create(destroy_bg_record);
+	_add_bg_record(records, name, SELECT_TORUS, 0, 0);
 	xfree(name);
-	bg_record->node_use = SELECT_COPROCESSOR_MODE;
-	bg_record->cnodes_per_bp = procs_per_node;
+	
+	while((bg_record = (bg_record_t *) list_pop(records)) != NULL) {
+		name = set_bg_block(NULL,
+				    bg_record->start, 
+				    geo, 
+				    bg_record->conn_type);		
+		if(!name) {
+			error("I was unable to make the "
+			      "requested block.");
+			rc = SLURM_ERROR;
+			goto no_total;
+		}
+		xfree(name);
+		
 #ifdef HAVE_BG_FILES
-	if((rc = configure_block(bg_record)) == SLURM_ERROR) {
-		slurm_mutex_unlock(&block_state_mutex);
-		return rc;
-	}
-	print_bg_record(bg_record);
-
-#else
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
-		while ((bg_record = (bg_record_t*) list_next(itr))) {
-			if (bg_record->bg_block_id)
-				continue;
-			bg_record->bg_block_id = xmalloc(8);
-			bg_record->job_running = -1;
-			snprintf(bg_record->bg_block_id, 8, "RMP%d", 
-				 block_inx++);
-			info("BG BlockID:%s Nodes:%s Conn:%s Mode:%s",
-			     bg_record->bg_block_id, bg_record->nodes,
-			     convert_conn_type(bg_record->conn_type),
-			     convert_node_use(bg_record->node_use));
+		if((rc = configure_block(bg_record)) == SLURM_ERROR) {
+			error("unable to configure block in api");
+			goto no_total;
 		}
-		list_iterator_destroy(itr);
-	} else {
-		error("create_static_blocks: no bg_list 4");
-		slurm_mutex_unlock(&block_state_mutex);
-		return SLURM_ERROR;
-	}
 #endif	/* HAVE_BG_FILES */
-	
+		list_push(bg_list, bg_record);
+	}
 no_total:
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
-		while ((bg_record = (bg_record_t*) list_next(itr)) != NULL) {
-#ifdef HAVE_BG_FILES
-			if ((bg_record->geo[X] == DIM_SIZE[X])
-			&&  (bg_record->geo[Y] == DIM_SIZE[Y])
-			&&  (bg_record->geo[Z] == DIM_SIZE[Z])) {
-#else
-			if ((bg_record->geo[X] == max_dim[X]+1)
-			&&  (bg_record->geo[Y] == max_dim[Y]+1)
-			&&  (bg_record->geo[Z] == max_dim[Z]+1)) {
-#endif
-				debug("full partiton = %s.", 
-				      bg_record->bg_block_id);
-				bg_record->full_block = 1;
-				full_system_block = bg_record;
+	if(records)
+		list_destroy(records);
+	slurm_mutex_unlock(&block_state_mutex);
+	return rc;
+}
+
+extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record)
+{
+	bg_record_t *found_record = NULL;
+	ListIterator itr;
+	int rc = SLURM_ERROR;
+
+	slurm_mutex_lock(&block_state_mutex);	
+	itr = list_iterator_create(my_bg_list);
+	while ((found_record = (bg_record_t *) list_next(itr)) != NULL) {
+		if(found_record && bg_record)
+			if(bg_record == found_record) {
+				list_remove(itr);
+				rc = SLURM_SUCCESS;
 				break;
 			}
-		}
-		list_iterator_destroy(itr);
-	} else {
-		error("create_static_blocks: no bg_list 5");
 	}
-	last_bg_update = time(NULL);
+	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&block_state_mutex);
-#ifdef _PRINT_BLOCKS_AND_EXIT
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
-		debug("\n\n");
-		while ((found_record = (bg_record_t *) list_next(itr)) 
-		       != NULL) {
-			print_bg_record(found_record);
-		}
-		list_iterator_destroy(itr);
-	} else {
-		error("create_static_blocks: no bg_list 5");
-	}
- 	exit(0);
-#endif	/* _PRINT_BLOCKS_AND_EXIT */
-	rc = SLURM_SUCCESS;
-	//exit(0);
+
 	return rc;
 }
 
 extern int bg_free_block(bg_record_t *bg_record)
 {
-#ifdef HAVE_BG_FILES
 	int rc;
+	
 	if(!bg_record) {
 		error("bg_free_block: there was no bg_record");
 		return SLURM_ERROR;
 	}
+	
 	while (1) {
-		if (bg_record->state != -1
+		if (bg_record->state != NO_VAL
 		    && bg_record->state != RM_PARTITION_FREE 
 		    && bg_record->state != RM_PARTITION_DEALLOCATING) {
+#ifdef HAVE_BG_FILES
 			debug("pm_destroy %s",bg_record->bg_block_id);
 			if ((rc = pm_destroy_partition(
 				     bg_record->bg_block_id)) 
 			    != STATUS_OK) {
 				if(rc == PARTITION_NOT_FOUND) {
-					debug("block %s is not found");
+					debug("block %s is not found",
+					      bg_record->bg_block_id);
 					break;
 				}
 				error("pm_destroy_partition(%s): %s "
@@ -830,6 +1191,11 @@ extern int bg_free_block(bg_record_t *bg_record)
 				      bg_record->bg_block_id, 
 				      bg_err_str(rc), bg_record->state);
 			}
+#else
+			slurm_mutex_lock(&block_state_mutex);
+			bg_record->state = RM_PARTITION_FREE;	
+			slurm_mutex_unlock(&block_state_mutex);
+#endif
 		}
 		
 		if ((bg_record->state == RM_PARTITION_FREE)
@@ -837,55 +1203,163 @@ extern int bg_free_block(bg_record_t *bg_record)
 			break;
 		sleep(3);
 	}
-#endif
+	remove_from_bg_list(bg_booted_block_list, bg_record);
+	
 	return SLURM_SUCCESS;
 }
 
 /* Free multiple blocks in parallel */
 extern void *mult_free_block(void *args)
 {
-#ifdef HAVE_BG_FILES
-	bg_record_t *bg_record = (bg_record_t*) args;
+	bg_record_t *bg_record = NULL;
+	
+	/*
+	 * Don't just exit when there is no work left. Creating 
+	 * pthreads from within a dynamically linked object (plugin)
+	 * causes large memory leaks on some systems that seem 
+	 * unavoidable even from detached pthreads.
+	 */
+	while (!agent_fini) {
+		slurm_mutex_lock(&freed_cnt_mutex);
+		bg_record = list_dequeue(bg_free_block_list);
+		slurm_mutex_unlock(&freed_cnt_mutex);
+		if (!bg_record) {
+			usleep(100000);
+			continue;
+		}
+		debug("freeing the block %s.", bg_record->bg_block_id);
+		bg_free_block(bg_record);	
+		debug("done\n");
+		slurm_mutex_lock(&freed_cnt_mutex);
+		num_block_freed++;
+		slurm_mutex_unlock(&freed_cnt_mutex);
+	}
+	slurm_mutex_lock(&freed_cnt_mutex);
+	free_cnt--;
+	slurm_mutex_unlock(&freed_cnt_mutex);	
+	return NULL;
+}
+
+/* destroy multiple blocks in parallel */
+extern void *mult_destroy_block(void *args)
+{
+	bg_record_t *bg_record = NULL;
+	int rc;
 
-	debug("freeing the block %s.", bg_record->bg_block_id);
-	bg_free_block(bg_record);	
-	debug("done\n");
+	/*
+	 * Don't just exit when there is no work left. Creating 
+	 * pthreads from within a dynamically linked object (plugin)
+	 * causes large memory leaks on some systems that seem 
+	 * unavoidable even from detached pthreads.
+	 */
+	while (!agent_fini) {
+		slurm_mutex_lock(&freed_cnt_mutex);
+		bg_record = list_dequeue(bg_destroy_block_list);
+		slurm_mutex_unlock(&freed_cnt_mutex);
+		if (!bg_record) {
+			usleep(100000);
+			continue;
+		}
+		debug("removing the jobs on block %s\n",
+		      bg_record->bg_block_id);
+		term_jobs_on_block(bg_record->bg_block_id);
+		
+		debug2("destroying %s", (char *)bg_record->bg_block_id);
+		bg_free_block(bg_record);
+		
+#ifdef HAVE_BG_FILES
+		rc = rm_remove_partition(
+			bg_record->bg_block_id);
+		if (rc != STATUS_OK) {
+			error("rm_remove_partition(%s): %s",
+			      bg_record->bg_block_id,
+			      bg_err_str(rc));
+		} else
+			debug("done\n");
+#endif
+		slurm_mutex_lock(&freed_cnt_mutex);
+		num_block_freed++;
+		destroy_bg_record(bg_record);
+		slurm_mutex_unlock(&freed_cnt_mutex);
+	}
 	slurm_mutex_lock(&freed_cnt_mutex);
-	num_block_freed++;
-	slurm_mutex_unlock(&freed_cnt_mutex);
-#endif	
+	destroy_cnt--;
+	slurm_mutex_unlock(&freed_cnt_mutex);	
 	return NULL;
 }
 
-/* destroy multiple blocks in parallel */
-extern void *mult_destroy_block(void *args)
+extern int free_block_list(List delete_list)
 {
-#ifdef HAVE_BG_FILES
-	bg_record_t *bg_record = (bg_record_t*) args;
-	int rc;
-
-	debug("removing the jobs on block %s\n",
-	      bg_record->bg_block_id);
-	term_jobs_on_block(bg_record->bg_block_id);
-	
-	debug("destroying %s\n",
-	      (char *)bg_record->bg_block_id);
-	bg_free_block(bg_record);
+	bg_record_t *found_record = NULL;
+	int retries;
+	List *block_list = NULL;
+	int *count = NULL;
+	pthread_attr_t attr_agent;
+	pthread_t thread_agent;
 	
-	rc = rm_remove_partition(
-		bg_record->bg_block_id);
-	if (rc != STATUS_OK) {
-		error("rm_remove_partition(%s): %s",
-		      bg_record->bg_block_id,
-		      bg_err_str(rc));
-	} else
-		debug("done\n");
 	slurm_mutex_lock(&freed_cnt_mutex);
-	num_block_freed++;
+	/* set up which list to push onto */
+	if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		block_list = &bg_destroy_block_list;
+		count = &destroy_cnt;
+	} else {
+		block_list = &bg_free_block_list;
+		count = &free_cnt;
+	}
+	if ((*block_list == NULL) 
+	    && ((*block_list = list_create(NULL)) == NULL))
+		fatal("malloc failure in free_block_list");
+	/* already running MAX_AGENTS we don't really need more 
+	   since they never end */
+	
+	while ((found_record = (bg_record_t*)list_pop(delete_list)) != NULL) {
+		/* push job onto queue in a FIFO */
+		if (list_push(*block_list, found_record) == NULL)
+			fatal("malloc failure in _block_op/list_push");
+		
+		if (*count > MAX_AGENT_COUNT) 
+			continue;
+		
+		(*count)++;
+		
+		slurm_attr_init(&attr_agent);
+		if (pthread_attr_setdetachstate(
+			    &attr_agent, 
+			    PTHREAD_CREATE_DETACHED))
+			error("pthread_attr_setdetachstate error %m");
+		retries = 0;
+		if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+			while (pthread_create(&thread_agent, 
+					      &attr_agent, 
+					      mult_destroy_block,
+					      NULL)) {
+				error("pthread_create "
+				      "error %m");
+				if (++retries > MAX_PTHREAD_RETRIES)
+					fatal("Can't create "
+					      "pthread");
+				/* sleep and retry */
+				usleep(1000);	
+			}
+		} else {
+			while (pthread_create(&thread_agent, 
+					      &attr_agent, 
+					      mult_free_block, 
+					      NULL)) {
+				error("pthread_create "
+				      "error %m");
+				if (++retries > MAX_PTHREAD_RETRIES)
+					fatal("Can't create "
+					      "pthread");
+				/* sleep and retry */
+				usleep(1000);	
+			}
+		}
+		pthread_attr_destroy(&attr_agent);
+	}
 	slurm_mutex_unlock(&freed_cnt_mutex);
-
-#endif	
-	return NULL;
+			
+	return SLURM_SUCCESS;
 }
 
 /*
@@ -977,11 +1451,24 @@ extern int read_bg_conf(void)
 		fatal("MloaderImage not configured in bluegene.conf");
 	if (!bluegene_ramdisk)
 		fatal("RamDiskImage not configured in bluegene.conf");
+	if (!bluegene_bp_node_cnt)
+		fatal("BasePartitionNodeCnt not configured in bluegene.conf "
+		      "make sure it is set before any BPs= line");
+	if (!bluegene_segment_node_cnt)
+		fatal("NodeCardNodeCnt not configured in bluegene.conf "
+		      "make sure it is set before any BPs= line");
+
+	if (bluegene_layout_mode == NO_VAL) {
+		info("Warning: LayoutMode was not specified in bluegene.conf "
+		     "defaulting to STATIC partitioning");
+		bluegene_layout_mode = LAYOUT_STATIC;
+	}
+	
 	if (!bridge_api_file)
 		info("BridgeAPILogFile not configured in bluegene.conf");
 	else
 		_reopen_bridge_log();	
-	if (!numpsets)
+	if (!bluegene_numpsets)
 		info("Warning: Numpsets not configured in bluegene.conf");
 //#if 0	
 	/* Check to see if the configs we have are correct */
@@ -990,14 +1477,23 @@ extern int read_bg_conf(void)
 	}
 //#endif
 	/* looking for blocks only I created */
-	if (create_static_blocks(NULL) == SLURM_ERROR) {
-		/* error in creating the static blocks, so
-		 * blocks referenced by submitted jobs won't
-		 * correspond to actual slurm blocks.
-		 */
-		fatal("Error, could not create the static blocks");
-		return SLURM_ERROR;
-	}
+	if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		init_wires();
+		slurm_mutex_lock(&block_state_mutex);
+		last_bg_update = time(NULL);
+		slurm_mutex_unlock(&block_state_mutex);
+		info("No blocks created until jobs are submitted");
+	} else {
+		if (create_defined_blocks(bluegene_layout_mode) 
+		    == SLURM_ERROR) {
+			/* error in creating the static blocks, so
+			 * blocks referenced by submitted jobs won't
+			 * correspond to actual slurm blocks.
+			 */
+			fatal("Error, could not create the static blocks");
+			return SLURM_ERROR;
+		}
+	} 
 	debug("Blocks have finished being created.");
 	blocks_are_created = 1;
 	
@@ -1153,28 +1649,24 @@ static void _set_bg_lists()
 	bg_record_t *bg_record = NULL;
 	
 	slurm_mutex_lock(&block_state_mutex);
-	if (bg_found_block_list) {
-		while ((bg_record = list_pop(bg_found_block_list)) != NULL) {
-		}
-	} else
-		bg_found_block_list = list_create(NULL);
+	if (bg_found_block_list) 
+		list_destroy(bg_found_block_list);
+	bg_found_block_list = list_create(NULL);
+	if (bg_booted_block_list) 
+		list_destroy(bg_booted_block_list);
+	bg_booted_block_list = list_create(NULL);
+	if (bg_job_block_list) 
+		list_destroy(bg_job_block_list);
+	bg_job_block_list = list_create(NULL);	
+	if (bg_curr_block_list)
+		list_destroy(bg_curr_block_list);	
+	bg_curr_block_list = list_create(destroy_bg_record);
 	
-	if (bg_curr_block_list){
-		while ((bg_record = list_pop(bg_curr_block_list)) != NULL){
-			destroy_bg_record(bg_record);
-		}
-	} else
-		bg_curr_block_list = list_create(destroy_bg_record);
 	
-/* empty the old list before reading new data */
-	if (bg_list) {
-		while ((bg_record = list_pop(bg_list)) != NULL) {
-			destroy_bg_record(bg_record);		
-		}
-	} else
-		bg_list = list_create(destroy_bg_record);
-	slurm_mutex_unlock(&block_state_mutex);
-		
+	if (bg_list) 
+		list_destroy(bg_list);
+	bg_list = list_create(destroy_bg_record);
+	slurm_mutex_unlock(&block_state_mutex);		
 }
 
 /*
@@ -1187,12 +1679,12 @@ static int _validate_config_nodes(void)
 {
 	int rc = SLURM_ERROR;
 #ifdef HAVE_BG_FILES
-	bg_record_t* record = NULL;	
-	bg_record_t* init_record = NULL;
+	bg_record_t* bg_record = NULL;	
+	bg_record_t* init_bg_record = NULL;
 	ListIterator itr_conf;
 	ListIterator itr_curr;
 	rm_partition_mode_t node_use;
-	
+	char tmp_char[256];
 	/* read current bg block info into bg_curr_block_list */
 	if (read_bg_blocks() == SLURM_ERROR)
 		return SLURM_ERROR;
@@ -1202,7 +1694,7 @@ static int _validate_config_nodes(void)
 	
 	if(bg_list) {
 		itr_conf = list_iterator_create(bg_list);
-		while ((record = (bg_record_t*) list_next(itr_conf))) {
+		while ((bg_record = (bg_record_t*) list_next(itr_conf))) {
 			/* translate hostlist to ranged 
 			   string for consistent format
 			   search here 
@@ -1212,31 +1704,23 @@ static int _validate_config_nodes(void)
 			if(bg_curr_block_list) {
 				itr_curr = list_iterator_create(
 					bg_curr_block_list);	
-				while ((init_record = (bg_record_t*) 
+				while ((init_bg_record = (bg_record_t*) 
 					list_next(itr_curr)) 
 				       != NULL) {
-					if (strcasecmp(record->nodes, 
-						       init_record->nodes))
+					if (strcasecmp(bg_record->nodes, 
+						       init_bg_record->nodes))
 						continue; /* wrong nodes */
-					if (record->conn_type 
-					    != init_record->conn_type)
+					if (bg_record->conn_type 
+					    != init_bg_record->conn_type)
 						continue; /* wrong conn_type */
-					if(record->quarter !=
-					    init_record->quarter)
+					if(bg_record->quarter !=
+					    init_bg_record->quarter)
 						continue; /* wrong quart */
-					record->bg_block_id = xstrdup(
-						init_record->bg_block_id);
-					record->state = init_record->state;
-					record->node_use = 
-						init_record->node_use;
-					record->user_uid = 
-						init_record->user_uid;
-					record->user_name = xstrdup(
-						init_record->user_name);
-					record->target_name = xstrdup(
-						init_record->target_name);
-					record->boot_state = 
-						init_record->boot_state;
+					if(bg_record->segment !=
+					    init_bg_record->segment)
+						continue; /* wrong segment */
+					copy_bg_record(init_bg_record, 
+						       bg_record);
 					break;
 				}
 				list_iterator_destroy(itr_curr);
@@ -1244,93 +1728,63 @@ static int _validate_config_nodes(void)
 				error("_validate_config_nodes: "
 				      "no bg_curr_block_list");
 			}
-			if (!record->bg_block_id) {
+			if (!bg_record->bg_block_id) {
+				_format_node_name(bg_record, tmp_char);
+				
 				info("Block found in bluegene.conf to be "
 				     "created: Nodes:%s", 
-				     record->nodes);
+				     tmp_char);
 				rc = SLURM_ERROR;
 			} else {
-				list_append(bg_found_block_list, record);
+				list_append(bg_found_block_list, bg_record);
+				_format_node_name(bg_record, tmp_char);
+				
 				info("Found existing BG BlockID:%s "
-				     "Nodes:%s Conn:%s Mode:%s",
-				     record->bg_block_id, 
-				     record->nodes,
-				     convert_conn_type(record->conn_type),
-				     convert_node_use(record->node_use));
+				     "Nodes:%s Conn:%s",
+				     bg_record->bg_block_id, 
+				     tmp_char,
+				     convert_conn_type(bg_record->conn_type));
 			}
 		}		
 		list_iterator_destroy(itr_conf);
 		if(bg_curr_block_list) {
 			itr_curr = list_iterator_create(
 				bg_curr_block_list);
-			while ((init_record = (bg_record_t*) 
+			while ((init_bg_record = (bg_record_t*) 
 				list_next(itr_curr)) 
 			       != NULL) {
-				_process_nodes(init_record);
+				_process_nodes(init_bg_record);
 				debug3("%s %d %d%d%d %d%d%d",
-				       init_record->bg_block_id, 
-				       init_record->bp_count, 
-				       init_record->geo[X],
-				       init_record->geo[Y],
-				       init_record->geo[Z],
+				       init_bg_record->bg_block_id, 
+				       init_bg_record->bp_count, 
+				       init_bg_record->geo[X],
+				       init_bg_record->geo[Y],
+				       init_bg_record->geo[Z],
 				       DIM_SIZE[X],
 				       DIM_SIZE[Y],
 				       DIM_SIZE[Z]);
-				if ((init_record->geo[X] == DIM_SIZE[X])
-				    && (init_record->geo[Y] == DIM_SIZE[Y])
-				    && (init_record->geo[Z] == DIM_SIZE[Z])) {
-					record = (bg_record_t*) 
+				if ((init_bg_record->geo[X] == DIM_SIZE[X])
+				    && (init_bg_record->geo[Y] == DIM_SIZE[Y])
+				    && (init_bg_record->geo[Z] == DIM_SIZE[Z]))
+				{
+					bg_record = (bg_record_t*) 
 						xmalloc(sizeof(bg_record_t));
-					list_append(bg_list, record);
-					full_system_block = record;
-					record->full_block = 1;
-					record->bg_block_id = xstrdup(
-						init_record->bg_block_id);
-					debug("full system %s",
-					      record->bg_block_id);
-					record->nodes = xstrdup(
-						init_record->nodes);
-					record->state = init_record->state;
-					record->node_use =
-						init_record->node_use;
-					record->user_uid =
-						init_record->user_uid;
-					record->user_name = xstrdup(
-						init_record->user_name);
-					record->target_name = xstrdup(
-						init_record->target_name);
-					record->conn_type = 
-						init_record->conn_type;
-					record->node_use = 
-						init_record->node_use;
-					record->bp_count = 
-						init_record->bp_count;
-					record->boot_state = 
-						init_record->boot_state;
-					record->switch_count = 
-						init_record->switch_count;
-					record->cnodes_per_bp = 
-						init_record->cnodes_per_bp;
-					record->quarter = 
-						init_record->quarter;
-					if((record->bitmap = 
-					    bit_copy(init_record->bitmap)) 
-					   == NULL) {
-						error("Unable to copy "
-						      "bitmap for", 
-						      init_record->nodes);
-					}
+					list_append(bg_list, bg_record);
 					list_append(bg_found_block_list, 
-						    record);
+						    bg_record);
+					copy_bg_record(init_bg_record, 
+						       bg_record);
+					bg_record->full_block = 1;
+					debug("full system %s",
+					      bg_record->bg_block_id);
+					_format_node_name(bg_record, tmp_char);
 					info("Found existing BG "
 					     "BlockID:%s "
-					     "Nodes:%s Conn:%s Mode:%s",
-					     record->bg_block_id, 
-					     record->nodes,
+					     "Nodes:%s Conn:%s",
+					     bg_record->bg_block_id, 
+					     tmp_char,
 					     convert_conn_type(
-						     record->conn_type),
-					     convert_node_use(
-						     record->node_use));
+						     bg_record->conn_type));
 					break;
 				}
 			}
@@ -1359,14 +1813,29 @@ static int _validate_config_nodes(void)
  */
 static int _bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 {
-	int size_a = rec_a->bp_count * rec_a->cnodes_per_bp;
-	int size_b = rec_b->bp_count * rec_b->cnodes_per_bp;
+	int size_a = rec_a->node_cnt;
+	int size_b = rec_b->node_cnt;
 	if (size_a < size_b)
 		return -1;
 	else if (size_a > size_b)
 		return 1;
-	else
-		return 0;
+	size_a = strcmp(rec_a->nodes, rec_b->nodes);
+	if (size_a < 0)
+		return -1;
+	else if (size_a > 0)
+		return 1;
+	
+	if (rec_a->quarter < rec_b->quarter)
+		return -1;
+	else if (rec_a->quarter > rec_b->quarter)
+		return 1;
+
+	if(rec_a->segment < rec_b->segment)
+		return -1;
+	else if(rec_a->segment > rec_b->segment)
+		return 1;
+
+	return 0;
 }
 
 static int _delete_old_blocks(void)
@@ -1381,36 +1850,13 @@ static int _delete_old_blocks(void)
 
 	num_block_to_free = 0;
 	num_block_freed = 0;
-				
+
 	if(!bg_recover) {
 		if(bg_curr_block_list) {
 			itr_curr = list_iterator_create(bg_curr_block_list);
-			while ((init_record = (bg_record_t*) 
-				list_next(itr_curr))) {
-				slurm_attr_init(&attr_agent);
-				if (pthread_attr_setdetachstate(
-					    &attr_agent, 
-					    PTHREAD_CREATE_JOINABLE))
-					error("pthread_attr_setdetach"
-						      "state error %m");
-
+			while ((init_record = 
+				(bg_record_t*)list_next(itr_curr))) {
 				list_push(bg_destroy_list, init_record);
-				retries = 0;
-				while (pthread_create(&thread_agent, 
-						      &attr_agent, 
-						      mult_destroy_block, 
-						      (void *)
-						      init_record)) {
-					error("pthread_create "
-					      "error %m");
-					if (++retries 
-					    > MAX_PTHREAD_RETRIES)
-						fatal("Can't create "
-						      "pthread");
-					/* sleep and retry */
-					usleep(1000);	
-				}
-				num_block_to_free++;
 			}
 			list_iterator_destroy(itr_curr);
 		} else {
@@ -1446,33 +1892,8 @@ static int _delete_old_blocks(void)
 					return SLURM_ERROR;
 				}
 				if(found_record == NULL) {
-					debug("deleting %s",
-					      init_record->bg_block_id);
-					slurm_attr_init(&attr_agent);
-					if (pthread_attr_setdetachstate(
-						    &attr_agent, 
-						    PTHREAD_CREATE_JOINABLE))
-						error("pthread_attr_setdetach"
-						      "state error %m");
-				
 					list_push(bg_destroy_list, 
 						  init_record);
-					retries = 0;
-					while (pthread_create(
-						       &thread_agent, 
-						       &attr_agent, 
-						       mult_destroy_block, 
-						       (void *)init_record)) {
-						error("pthread_create "
-						      "error %m");
-						if (++retries 
-						    > MAX_PTHREAD_RETRIES)
-							fatal("Can't create "
-							      "pthread");
-						/* sleep and retry */
-						usleep(1000);	
-					}
-					num_block_to_free++;
 				}
 			}		
 			list_iterator_destroy(itr_curr);
@@ -1482,6 +1903,44 @@ static int _delete_old_blocks(void)
 			return SLURM_ERROR;
 		}
 	}
+
+	slurm_mutex_lock(&freed_cnt_mutex);
+	if ((bg_destroy_block_list == NULL) 
+	    && ((bg_destroy_block_list = list_create(NULL)) == NULL))
+		fatal("malloc failure in block_list");
+	
+	itr_curr = list_iterator_create(bg_destroy_list);
+	while ((init_record = (bg_record_t*) list_next(itr_curr))) {
+		list_push(bg_destroy_block_list, init_record);
+		num_block_to_free++;
+		if (destroy_cnt > MAX_AGENT_COUNT) 
+			continue;
+		
+		destroy_cnt++;
+
+		slurm_attr_init(&attr_agent);
+		if (pthread_attr_setdetachstate(&attr_agent, 
+						PTHREAD_CREATE_DETACHED))
+			error("pthread_attr_setdetachstate error %m");
+		
+		retries = 0;
+		while (pthread_create(&thread_agent, 
+				      &attr_agent, 
+				      mult_destroy_block, 
+				      NULL)) {
+			error("pthread_create "
+			      "error %m");
+			if (++retries > MAX_PTHREAD_RETRIES)
+				fatal("Can't create "
+				      "pthread");
+			/* sleep and retry */
+			usleep(1000);	
+		}
+		pthread_attr_destroy(&attr_agent);
+	}
+	list_iterator_destroy(itr_curr);
+	slurm_mutex_unlock(&freed_cnt_mutex);
+	
 	retries=30;
 	while(num_block_to_free != num_block_freed) {
 		_update_bg_record_state(bg_destroy_list);
@@ -1495,6 +1954,7 @@ static int _delete_old_blocks(void)
 		retries++;
 		sleep(1);
 	}
+	
 	list_destroy(bg_destroy_list);
 	
 #endif	
@@ -1537,6 +1997,255 @@ static void _strip_13_10(char *line)
 	}
 }
 
+static int _split_block(bg_record_t *bg_record, int procs, int *block_inx) 
+{
+	bg_record_t *found_record = NULL;
+	ba_node_t *ba_node = NULL;
+	bool full_bp = false; 
+	int small_count = 0;
+	int small_size = 0;
+	int num_segment = 0, num_quarter = 0;
+	int i;
+	int node_cnt = 0;
+	int quarter = 0;
+	int segment = 0;
+
+	if(bg_record->quarter == NO_VAL)
+		full_bp = true;
+	
+	if(procs == (procs_per_node/16)) {
+		num_segment=4;
+		if(full_bp)
+			num_quarter=3;
+	} else if(full_bp) {
+		num_quarter = 4;
+	} else {
+		error("you asked for something that was already this size");
+		return SLURM_ERROR;
+	}
+	debug("asking for %d 32s from a %d block",
+	     num_segment, bg_record->node_cnt);
+	small_count = num_segment+num_quarter; 
+
+	ba_node = list_pop(bg_record->bg_block_list);
+	/* break base partition up into 16 parts */
+	small_size = bluegene_bp_node_cnt/bluegene_segment_node_cnt;
+	node_cnt = 0;
+	if(!full_bp)
+		quarter = bg_record->quarter;
+	else
+		quarter = 0;
+	segment = 0;
+	for(i=0; i<small_count; i++) {
+		if(i == num_segment) {
+			/* break base partition up into 4 parts */
+			small_size = 4;
+		}
+		
+		if(small_size == 4)
+			segment = NO_VAL;
+		else
+			segment = i%4; 
+		found_record = _create_small_record(bg_record,
+						    quarter,
+						    segment);
+#ifdef HAVE_BG_FILES
+		if((rc = configure_block(found_record)) == SLURM_ERROR) {
+			xfree(found_record);
+			slurm_mutex_unlock(&block_state_mutex);
+			error("unable to configure block in api");
+			return SLURM_ERROR;
+		}
+#else
+		found_record->bg_block_id = xmalloc(sizeof(char)*8);
+		snprintf(found_record->bg_block_id, 8, "RMP%d", 
+			 (*block_inx)++);
+#endif
+		list_push(bg_list, found_record);
+		print_bg_record(found_record);
+		node_cnt += bluegene_bp_node_cnt/small_size;
+		if(node_cnt == 128) {
+			node_cnt = 0;
+			quarter++;
+		}
+	}
+			
+	
+	return SLURM_SUCCESS;
+}
+
+static bg_record_t *_create_small_record(bg_record_t *bg_record, 
+					 int quarter, int segment)
+{
+	bg_record_t *found_record = NULL;
+	int small_size = 4;
+	
+	found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
+				
+	found_record->job_running = NO_VAL;
+	found_record->user_name = xstrdup(bg_record->user_name);
+	found_record->user_uid = bg_record->user_uid;
+	found_record->bg_block_list = list_create(NULL);
+	found_record->hostlist = hostlist_create(NULL);
+	found_record->nodes = xstrdup(bg_record->nodes);
+
+	_process_nodes(found_record);
+				
+	found_record->conn_type = SELECT_SMALL;
+				
+	found_record->node_use = SELECT_COPROCESSOR_MODE;
+	if(segment != NO_VAL)
+		small_size = 16;
+	found_record->cpus_per_bp = procs_per_node/small_size;
+	found_record->node_cnt = bluegene_bp_node_cnt/small_size;
+	found_record->quarter = quarter; 
+	found_record->segment = segment;
+			
+	return found_record;
+}
+
+static int _add_bg_record(List records, char *nodes, 
+			  rm_connection_type_t conn_type, 
+			  int num_segment, int num_quarter)
+{
+	bg_record_t *bg_record = NULL;
+	bg_record_t *found_record = NULL;
+	ba_node_t *ba_node = NULL;
+	ListIterator itr;
+	struct passwd *pw_ent = NULL;
+	int i, len;
+	int small_size = 0;
+	int small_count = 0;
+	int quarter = 0;
+	int segment = 0;
+	int node_cnt = 0;
+	
+	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
+	
+	bg_record->user_name = 
+		xstrdup(slurmctld_conf.slurm_user_name);
+	bg_record->target_name = 
+		xstrdup(slurmctld_conf.slurm_user_name);
+	if((pw_ent = getpwnam(bg_record->user_name)) == NULL) {
+		error("getpwnam(%s): %m", bg_record->user_name);
+	} else {
+		bg_record->user_uid = pw_ent->pw_uid;
+	}
+	bg_record->bg_block_list = list_create(NULL);		
+	bg_record->hostlist = hostlist_create(NULL);
+	bg_record->quarter = NO_VAL;
+	bg_record->segment = NO_VAL;
+	/* bg_record->boot_state = 0; 	Implicit */
+	/* bg_record->state = 0;	Implicit */
+	debug2("asking for %s %d %d",nodes, num_quarter, num_segment);
+	len = strlen(nodes);
+	i=0;
+	while((nodes[i] != '[' && (nodes[i] > 57 || nodes[i] < 48)) 
+	      && (i<len)) 		
+		i++;
+	
+	if(i<len) {
+		len -= i;
+		bg_record->nodes = xmalloc(sizeof(char)*
+					   (len
+					    +strlen(slurmctld_conf.node_prefix)
+					    +1));
+		
+		sprintf(bg_record->nodes, "%s%s\0", 
+			slurmctld_conf.node_prefix, nodes+i);
+	} else 
+		fatal("Nodes=%s is in a weird format", nodes); 
+	
+	_process_nodes(bg_record);
+	if(bg_list) {
+		itr = list_iterator_create(bg_list);
+		while ((found_record = 
+			(bg_record_t *) list_next(itr)) != NULL) {
+			if(bit_equal(bg_record->bitmap, found_record->bitmap)
+			   && (bg_record->quarter == found_record->quarter)
+			   && (bg_record->segment == found_record->segment)){
+				debug2("This partition %s %d %d"
+				       "already exists here %s",
+				       bg_record->nodes,
+				       bg_record->quarter,
+				       bg_record->segment,
+				       found_record->bg_block_id);
+				list_iterator_destroy(itr);
+				destroy_bg_record(bg_record);
+				return SLURM_SUCCESS;
+			}
+		}
+		list_iterator_destroy(itr);
+	}
+	
+	bg_record->node_use = SELECT_COPROCESSOR_MODE;
+	bg_record->conn_type = conn_type;
+	bg_record->cpus_per_bp = procs_per_node;
+	bg_record->node_cnt = bluegene_bp_node_cnt * bg_record->bp_count;
+	bg_record->job_running = NO_VAL;
+		
+	if(bg_record->conn_type != SELECT_SMALL)
+		list_append(records, bg_record);
+	else {
+		if(num_segment==0 && num_quarter==0) {
+			info("No specs given for this small block, "
+			     "I am spliting this block into 4 quarters");
+			num_quarter=4;
+		}
+		if(((num_segment*bluegene_segment_node_cnt) + 
+		    (num_quarter*bluegene_quarter_node_cnt)) 
+		   != bluegene_bp_node_cnt)
+			fatal("There is an error in your bluegene.conf file.\n"
+			      "I am unable to request %d nodes in one "
+			      "base partition with %d nodes.", 
+			      ((num_segment*bluegene_segment_node_cnt) + 
+			       (num_quarter*bluegene_quarter_node_cnt)), 
+			      bluegene_bp_node_cnt);
+		small_count = num_segment+num_quarter; 
+		
+		/* Automatically create 4-way split if 
+		 * conn_type == SELECT_SMALL in bluegene.conf
+		 * Here we go through each node listed and do the same thing
+		 * for each node.
+		 */
+		itr = list_iterator_create(bg_record->bg_block_list);
+		while ((ba_node = list_next(itr)) != NULL) {
+			/* break base partition up into 16 parts */
+			small_size = 16;
+			node_cnt = 0;
+			quarter = 0;
+			segment = 0;
+			for(i=0; i<small_count; i++) {
+				if(i == num_segment) {
+					/* break base partition 
+					   up into 4 parts */
+					small_size = 4;
+				}
+								
+				
+				
+				if(small_size == 4)
+					segment = NO_VAL;
+				else
+					segment = i%4; 
+				found_record = _create_small_record(bg_record,
+								    quarter,
+								    segment);
+								 
+				list_append(records, found_record);
+				node_cnt += bluegene_bp_node_cnt/small_size;
+				if(node_cnt == 128) {
+					node_cnt = 0;
+					quarter++;
+				}
+			}
+		}
+		list_iterator_destroy(itr);
+		destroy_bg_record(bg_record);
+	} 
+	return SLURM_SUCCESS;
+}
+
 /*
  *
  * _parse_bg_spec - parse the block specification, build table and 
@@ -1554,26 +2263,29 @@ static int _parse_bg_spec(char *in_line)
 	char *nodes = NULL, *conn_type = NULL;
 	char *blrts_image = NULL,   *linux_image = NULL;
 	char *mloader_image = NULL, *ramdisk_image = NULL;
-	char *api_file = NULL;
-	int pset_num=-1, api_verb=-1;
-	bg_record_t *bg_record = NULL;
-	bg_record_t *small_bg_record = NULL;
-	ba_node_t *ba_node = NULL;
-	struct passwd *pw_ent = NULL;
-	ListIterator itr;
-	int i=0;
-	
+	char *api_file = NULL, *layout = NULL;
+	int pset_num=-1, api_verb=-1, num_segment=0, num_quarter=0;
+	int bp_node_cnt = 0;
+	int nc_node_cnt = 0;
+	rm_connection_type_t send_conn;
+
 	//info("in_line = %s",in_line);
 	error_code = slurm_parser(in_line,
 				  "BlrtsImage=", 's', &blrts_image,
 				  "LinuxImage=", 's', &linux_image,
 				  "MloaderImage=", 's', &mloader_image,
-				  "Numpsets=", 'd', &pset_num,
 				  "BridgeAPIVerbose=", 'd', &api_verb,
 				  "BridgeAPILogFile=", 's', &api_file,
-				  "Nodes=", 's', &nodes,
 				  "RamDiskImage=", 's', &ramdisk_image,
+				  "Numpsets=", 'd', &pset_num,
+				  "BasePartitionNodeCnt=", 'd', &bp_node_cnt,
+				  "NodeCardNodeCnt=", 'd', &nc_node_cnt,
+				  "LayoutMode=", 's', &layout,
+				  "Nodes=", 's', &nodes,
+				  "BPs=", 's', &nodes,
 				  "Type=", 's', &conn_type,
+				  "Nodecards=", 'd', &num_segment,
+				  "Quarters=", 'd', &num_quarter,
 				  "END");
 
 	if (error_code)
@@ -1605,105 +2317,66 @@ static int _parse_bg_spec(char *in_line)
 		bridge_api_file = api_file;
 		api_file = NULL;	/* nothing left to xfree */
 	}
+	if (layout) {
+		if(!strcasecmp(layout,"STATIC")) 
+			bluegene_layout_mode = LAYOUT_STATIC;
+		else if(!strcasecmp(layout,"OVERLAP")) 
+			bluegene_layout_mode = LAYOUT_OVERLAP;
+		else if(!strcasecmp(layout,"DYNAMIC")) 
+			bluegene_layout_mode = LAYOUT_DYNAMIC;
+		else {
+			fatal("I don't understand this LayoutMode = %s", 
+			      layout);
+		}
+		xfree(layout);
+	}
+	
 	if (pset_num > 0) {
-		numpsets = pset_num;
+		bluegene_numpsets = pset_num;
 	}
 	if (api_verb >= 0) {
 		bridge_api_verb = api_verb;
 	}
-
+	if (bp_node_cnt > 0 && !bluegene_bp_node_cnt) {
+		bluegene_bp_node_cnt = bp_node_cnt;
+		bluegene_quarter_node_cnt = bp_node_cnt/4;		
+	}
+	if (nc_node_cnt > 0 && !bluegene_segment_node_cnt) {
+		bluegene_segment_node_cnt = nc_node_cnt;		
+	}
+	
 	/* Process node information */
 	if (!nodes)
 		return SLURM_SUCCESS;	/* not block line. */
 	
-	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
+	if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		xfree(nodes);
+		xfree(conn_type);
+		return SLURM_SUCCESS;
+	}
 	
-	bg_record->user_name = 
-		xstrdup(slurmctld_conf.slurm_user_name);
-	if((pw_ent = getpwnam(bg_record->user_name)) == NULL) {
-		error("getpwnam(%s): %m", bg_record->user_name);
-	} else {
-		bg_record->user_uid = pw_ent->pw_uid;
+	if (!bluegene_bp_node_cnt) {
+		error("BasePartitionNodeCnt not configured in bluegene.conf "
+		      "defaulting to 512 as BasePartitionNodeCnt");
+		bluegene_bp_node_cnt = 512;
+		bluegene_quarter_node_cnt = 128;
 	}
-	bg_record->bg_block_list = list_create(NULL);		
-	bg_record->hostlist = hostlist_create(NULL);
-	/* bg_record->boot_state = 0; 	Implicit */
-	/* bg_record->state = 0;	Implicit */
-	api_verb = strlen(nodes);
-	i=0;
-	while((nodes[i] != '[' 
-	       && (nodes[i] > 57 || nodes[i] < 48)) 
-	      && (i<api_verb)) {
-		
-		i++;
+	if (!bluegene_segment_node_cnt) {
+		error("NodeCardNodeCnt not configured in bluegene.conf "
+		      "defaulting to 32 as NodeCardNodeCnt");
+		bluegene_segment_node_cnt = 32;
 	}
-	if(i<api_verb) {
-		api_verb -= i;
-		bg_record->nodes = xmalloc(sizeof(char)*
-					   (api_verb
-					    +strlen(slurmctld_conf.node_prefix)
-					    +1));
-		
-		sprintf(bg_record->nodes, "%s%s\0", 
-			slurmctld_conf.node_prefix, nodes+i);
-	} else 
-		fatal("Nodes=%s is in a weird format", nodes); 
-	xfree(nodes); 
-	
-	_process_nodes(bg_record);
 	if (!conn_type || !strcasecmp(conn_type,"TORUS"))
-		bg_record->conn_type = SELECT_TORUS;
+		send_conn = SELECT_TORUS;
 	else if(!strcasecmp(conn_type,"MESH"))
-		bg_record->conn_type = SELECT_MESH;
+		send_conn = SELECT_MESH;
 	else
-		bg_record->conn_type = SELECT_SMALL;
+		send_conn = SELECT_SMALL;
 	xfree(conn_type);
-	
-	bg_record->node_use = SELECT_COPROCESSOR_MODE;
-	bg_record->cnodes_per_bp = procs_per_node;
-	bg_record->quarter = -1;
-	
-	if(bg_record->conn_type != SELECT_SMALL)
-		list_append(bg_list, bg_record);
-	else {
-		/* Automatically create 4-way split if 
-		 * conn_type == SELECT_SMALL in bluegene.conf
-		 */
-		itr = list_iterator_create(bg_record->bg_block_list);
-		while ((ba_node = list_next(itr)) != NULL) {
-			for(i=0; i<4 ; i++) {
-				small_bg_record = (bg_record_t*) 
-					xmalloc(sizeof(bg_record_t));
-				list_append(bg_list, small_bg_record);
-				
-				small_bg_record->user_name = 
-					xstrdup(bg_record->user_name);
-				small_bg_record->user_uid = 
-					bg_record->user_uid;
-				small_bg_record->bg_block_list = 
-					list_create(NULL);
-				small_bg_record->hostlist = 
-					hostlist_create(NULL);
-				small_bg_record->nodes = 
-					xstrdup(bg_record->nodes);
-
-				_process_nodes(small_bg_record);
-				
-				small_bg_record->conn_type = 
-					SELECT_SMALL;
-				
-				small_bg_record->node_use = 
-					SELECT_COPROCESSOR_MODE;
-				
-				small_bg_record->cnodes_per_bp = 
-					procs_per_node/4;
-				small_bg_record->quarter = i; 
-			}
-		}
-		list_iterator_destroy(itr);
-		destroy_bg_record(bg_record);
-	} 
 
+	_add_bg_record(bg_list, nodes, send_conn, num_segment, num_quarter);
+	xfree(nodes);
+	
 	return SLURM_SUCCESS;
 }
 
@@ -1818,6 +2491,8 @@ static void _process_nodes(bg_record_t *bg_record)
 		      bg_record->nodes);
 	}
 #endif
+	bg_record->node_cnt = bluegene_bp_node_cnt * bg_record->bp_count;
+	
 	return;
 }
 
diff --git a/src/plugins/select/bluegene/plugin/bluegene.h b/src/plugins/select/bluegene/plugin/bluegene.h
index 9141ed61e6c..13050214390 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.h
+++ b/src/plugins/select/bluegene/plugin/bluegene.h
@@ -48,18 +48,28 @@ typedef int lifecycle_type_t;
 
 enum block_lifecycle {DYNAMIC, STATIC};
 
+typedef enum bg_layout_type {
+	LAYOUT_STATIC,  /* no overlaps, except for full system block
+			   blocks never change */
+	LAYOUT_OVERLAP, /* overlaps permitted, must be defined in 
+			   bluegene.conf file */
+	LAYOUT_DYNAMIC	/* slurm will make all blocks */
+}bg_layout_t;
+
 typedef struct bg_record {
+	pm_partition_id_t bg_block_id;	/* ID returned from MMCS	*/
 	char *nodes;			/* String of nodes in block */
 	char *user_name;		/* user using the block */
 	char *target_name;		/* when a block is freed this 
 					   is the name of the user we 
 					   want on the block */
+	int full_block;                 /* wether or not block is the full
+					   block */
 	uid_t user_uid;   		/* Owner of block uid	*/
-	pm_partition_id_t bg_block_id;	/* ID returned from MMCS	*/
 	lifecycle_type_t block_lifecycle;/* either STATIC or DYNAMIC	*/
 	rm_partition_state_t state;   	/* the allocated block   */
 	int start[BA_SYSTEM_DIMENSIONS];/* start node */
-	int geo[BA_SYSTEM_DIMENSIONS];  /* geometry */
+	uint16_t geo[BA_SYSTEM_DIMENSIONS];  /* geometry */
 	rm_connection_type_t conn_type;	/* Mesh or Torus or NAV */
 	rm_partition_mode_t node_use;	/* either COPROCESSOR or VIRTUAL */
 	rm_partition_t *bg_block;       /* structure to hold info from db2 */
@@ -74,13 +84,14 @@ typedef struct bg_record {
 	int boot_count;                 /* number of attemts boot attempts */
 	bitstr_t *bitmap;               /* bitmap to check the name 
 					   of block */
-	int full_block;             /* wether or not block is the full
-					   block */
 	int job_running;                /* job id if there is a job running 
 					   on the block */
-	int cnodes_per_bp;              /* count of cnodes per Base block */
+	int cpus_per_bp;                /* count of cpus per base part */
+	int node_cnt;                   /* count of nodes per block */
 	int quarter;                    /* used for small blocks 
 					   determine quarter of BP */
+	int segment;                    /* used for small blocks 
+					   determine segment of quarter */
 } bg_record_t;
 
 typedef struct {
@@ -107,21 +118,28 @@ extern char *bluegene_linux;
 extern char *bluegene_mloader;
 extern char *bluegene_ramdisk;
 extern char *bridge_api_file;
-extern int numpsets;
+extern bg_layout_t bluegene_layout_mode;
+extern int bluegene_numpsets;
+extern int bluegene_bp_node_cnt;
+extern int bluegene_segment_node_cnt;
+extern int bluegene_quarter_node_cnt;
 extern ba_system_t *ba_system_ptr;
 extern time_t last_bg_update;
+
 extern List bg_curr_block_list; 	/* Initial bg block state */
 extern List bg_list;			/* List of configured BG blocks */
+extern List bg_job_block_list;  	/* jobs running in these blocks */
+extern List bg_booted_block_list;  	/* blocks that are booted */
+
 extern bool agent_fini;
 extern pthread_mutex_t block_state_mutex;
 extern int num_block_to_free;
 extern int num_block_freed;
 extern int blocks_are_created;
 extern int procs_per_node;
-extern bg_record_t *full_system_block;
-
 
 #define MAX_PTHREAD_RETRIES  1
+#define MAX_AGENT_COUNT      30
 
 #include "bg_block_info.h"
 #include "bg_job_place.h"
@@ -138,8 +156,9 @@ extern int init_bg(void);
 extern void fini_bg(void);
 
 /* Log a bg_record's contents */
-extern void print_bg_record(bg_record_t* record);
-extern void destroy_bg_record(void* object);
+extern void print_bg_record(bg_record_t *record);
+extern void destroy_bg_record(void *object);
+extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record);
 
 /* return bg_record from bg_list */
 extern bg_record_t *find_bg_record(char *bg_block_id);
@@ -148,6 +167,9 @@ extern bg_record_t *find_bg_record(char *bg_block_id);
    updated before call of function. 
 */
 extern int update_block_user(bg_record_t *bg_block_id); 
+extern int format_node_name(bg_record_t *bg_record, char tmp_char[]);
+extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b);
+
 
 /* remove all users from a block but what is in user_name */
 /* Note return codes */
@@ -177,17 +199,19 @@ extern void *bluegene_agent(void *args);
 extern char *bg_err_str(status_t inx);
 
 /*
- * create_static_blocks - create the static blocks that will be used
+ * create_*_block(s) - functions for creating blocks that will be used
  *   for scheduling.
- * IN/OUT block_list - (global, from slurmctld): SLURM's block 
- *   configurations. Fill in bg_block_id                 
  * RET - success of fitting all configurations
  */
-extern int create_static_blocks(List block_list);
+extern int create_defined_blocks(bg_layout_t overlapped);
+extern int create_dynamic_block(ba_request_t *request, List my_block_list);
+extern int create_full_system_block();
 
 extern int bg_free_block(bg_record_t *bg_record);
+extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record);
 extern void *mult_free_block(void *args);
 extern void *mult_destroy_block(void *args);
+extern int free_block_list(List delete_list);
 extern int read_bg_conf(void);
 
 /* block_sys.c */
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 7216f37c699..3f5638b3fc6 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -171,7 +171,6 @@ extern int fini ( void )
  */
  extern int select_p_block_init(List part_list)
 {
-	xassert(part_list);
 #ifdef HAVE_BG
 	if(read_bg_conf() == SLURM_ERROR) {
 		fatal("Error, could not read the file");
@@ -179,7 +178,7 @@ extern int fini ( void )
 	}
 #else
 	/*looking for blocks only I created */
-	if (create_static_blocks(part_list) == SLURM_ERROR) {
+	if (create_static_blocks() == SLURM_ERROR) {
 		/* error in creating the static blocks, so
 		 * blocks referenced by submitted jobs won't
 		 * correspond to actual slurm blocks.
@@ -188,8 +187,7 @@ extern int fini ( void )
 		return SLURM_ERROR;
 	}
 #endif
-	sort_bg_record_inc_size(bg_list);
-
+	
 	return SLURM_SUCCESS; 
 }
 
@@ -300,6 +298,7 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 		pack_time(last_bg_update, buffer);
 
 		if(bg_list) {
+			slurm_mutex_lock(&block_state_mutex);
 			itr = list_iterator_create(bg_list);
 			while ((bg_record = (bg_record_t *) list_next(itr)) 
 			       != NULL) {
@@ -309,6 +308,7 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 				blocks_packed++;
 			}
 			list_iterator_destroy(itr);
+			slurm_mutex_unlock(&block_state_mutex);
 		} else {
 			error("select_p_pack_node_info: no bg_list");
 			return SLURM_ERROR;
@@ -339,14 +339,158 @@ extern int select_p_update_nodeinfo (struct job_record *job_ptr,
 }
 
 extern int select_p_get_extra_jobinfo (struct node_record *node_ptr, 
-                                      struct job_record *job_ptr, 
+				       struct job_record *job_ptr, 
                                        enum select_data_info info,
                                        void *data)
 {
        return SLURM_SUCCESS;
 }
 
-extern int select_p_get_info_from_plugin (enum select_data_info info, void *data)
+extern int select_p_get_info_from_plugin (enum select_data_info info, 
+					  void *data)
 {
-       return SLURM_SUCCESS;
+	return SLURM_SUCCESS;
+}
+
+extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
+{
+	job_desc_msg_t *job_desc = (job_desc_msg_t *)data;
+	int *nodes = (int *)data;
+	int tmp;
+	ListIterator itr;
+	bg_record_t *bg_record = NULL;
+	bg_record_t *found_record = NULL;
+	
+	switch (type) {
+	case SELECT_GET_NODE_MIN_OFFSET:
+		if(bg_list) {
+			itr = list_iterator_create(bg_list);
+			bg_record = (bg_record_t *)list_next(itr);
+			list_iterator_destroy(itr);
+		}
+		
+		if(!bg_record || 
+		   (bg_record->cpus_per_bp == procs_per_node)) 
+			(*nodes) = bluegene_bp_node_cnt;
+		else 
+			(*nodes) = bg_record->node_cnt;		
+		break;
+	case SELECT_GET_NODE_MAX_OFFSET:
+		if(bg_list) {
+			itr = list_iterator_create(bg_list);
+			while ((bg_record = (bg_record_t *) 
+				list_next(itr)) != NULL) 
+				found_record = bg_record;
+			list_iterator_destroy(itr);
+		}
+		if(!found_record|| 
+		   (found_record->cpus_per_bp == procs_per_node))
+			(*nodes) = bluegene_bp_node_cnt;
+		else
+			(*nodes) = found_record->node_cnt;		
+		break;
+	case SELECT_APPLY_NODE_MIN_OFFSET:
+		if(bg_list) {
+			itr = list_iterator_create(bg_list);
+			bg_record = (bg_record_t *)list_next(itr);
+			list_iterator_destroy(itr);
+		}
+		if(!bg_record || 
+		   (bg_record->cpus_per_bp == procs_per_node)) 
+			(*nodes) *= bluegene_bp_node_cnt;
+		else 
+			(*nodes) *= bg_record->node_cnt;		
+		break;
+	case SELECT_APPLY_NODE_MAX_OFFSET:
+		if(bg_list) {
+			itr = list_iterator_create(bg_list);
+			while ((bg_record = (bg_record_t *) 
+				list_next(itr)) != NULL) 
+				found_record = bg_record;
+			list_iterator_destroy(itr);
+		}
+		if(!found_record || 
+		   (found_record->cpus_per_bp == procs_per_node))
+			(*nodes) *= bluegene_bp_node_cnt;
+		else
+			(*nodes) *= found_record->node_cnt;		
+		break;
+	case SELECT_SET_NODE_CNT:
+		select_g_get_jobinfo(job_desc->select_jobinfo,
+				     SELECT_DATA_ALTERED, &tmp);
+		if(tmp == 1) {
+			return SLURM_SUCCESS;
+		}
+		tmp = 1;
+		select_g_set_jobinfo(job_desc->select_jobinfo,
+				     SELECT_DATA_ALTERED, &tmp);
+		tmp = NO_VAL;
+		select_g_set_jobinfo(job_desc->select_jobinfo,
+				     SELECT_DATA_MAX_PROCS, 
+				     &tmp);
+				
+		if(job_desc->min_nodes == NO_VAL)
+			return SLURM_SUCCESS;
+
+		/* See if min_nodes is greater than one base partition */
+		if(job_desc->min_nodes > bluegene_bp_node_cnt) {
+			/*
+			  if it is make sure it is a factor of 
+			  bluegene_bp_node_cnt, if it isn't make it 
+			  that way 
+			*/
+			tmp = job_desc->min_nodes % bluegene_bp_node_cnt;
+			if(tmp > 0)
+				job_desc->min_nodes += 
+					(bluegene_bp_node_cnt-tmp);
+		}
+		tmp = job_desc->min_nodes / bluegene_bp_node_cnt;
+		
+		/* this means it is greater or equal to one bp */
+		if(tmp > 0) 
+			job_desc->min_nodes = tmp;
+		else { 
+			/* this means it is either a quarter or smaller */
+			tmp = job_desc->min_nodes % bluegene_segment_node_cnt;
+			if(tmp > 0)
+				job_desc->min_nodes += 
+					(bluegene_segment_node_cnt-tmp);
+			tmp = bluegene_bp_node_cnt/job_desc->min_nodes;
+			job_desc->num_procs = procs_per_node/tmp;
+			job_desc->min_nodes = 1;
+		}
+		
+		if(job_desc->max_nodes == NO_VAL) 
+			return SLURM_SUCCESS;
+		
+		if(job_desc->max_nodes > bluegene_bp_node_cnt) {
+			tmp = job_desc->max_nodes % bluegene_bp_node_cnt;
+			if(tmp > 0)
+				job_desc->max_nodes += 
+					(bluegene_bp_node_cnt-tmp);
+		}
+		tmp = job_desc->max_nodes / bluegene_bp_node_cnt;
+		if(tmp > 0) {
+			job_desc->max_nodes = tmp;
+			tmp = NO_VAL;
+		} else {
+			tmp = job_desc->max_nodes % bluegene_segment_node_cnt;
+			if(tmp > 0)
+				job_desc->max_nodes += 
+					(bluegene_segment_node_cnt-tmp);
+			tmp = bluegene_bp_node_cnt/job_desc->max_nodes;
+			tmp = procs_per_node/tmp;
+			
+			select_g_set_jobinfo(job_desc->select_jobinfo,
+					     SELECT_DATA_MAX_PROCS, 
+					     &tmp);
+			job_desc->max_nodes = 1;
+		}
+		tmp = NO_VAL;
+			
+		break;
+	default:
+		error("unknown option %d for alter_node_cnt",type);
+	}
+	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/select/bluegene/plugin/sfree.c b/src/plugins/select/bluegene/plugin/sfree.c
index 7d2d8a3295c..99d53a68f69 100644
--- a/src/plugins/select/bluegene/plugin/sfree.c
+++ b/src/plugins/select/bluegene/plugin/sfree.c
@@ -132,13 +132,13 @@ int main(int argc, char *argv[])
 		}
 		delete_record = xmalloc(sizeof(delete_record_t));
 		delete_record->bg_block_id = xstrdup(bg_block_id);
-		delete_record->state = -1;
+		delete_record->state = NO_VAL;
 		list_push(delete_record_list, delete_record);
 
 		slurm_attr_init(&attr_agent);
 		if (pthread_attr_setdetachstate(
 			    &attr_agent, 
-			    PTHREAD_CREATE_JOINABLE))
+			    PTHREAD_CREATE_DETACHED))
 			error("pthread_attr_setdetach"
 			      "state error %m");
 		
@@ -156,6 +156,7 @@ int main(int argc, char *argv[])
 			/* sleep and retry */
 			usleep(1000);	
 		}
+		pthread_attr_destroy(&attr_agent);
 		num_block_to_free++;
 	} else {
 		if ((rc = rm_get_partitions_info(block_state, &block_list))
@@ -221,13 +222,13 @@ int main(int argc, char *argv[])
 			
 			free(bg_block_id);
 			
-			delete_record->state = -1;
+			delete_record->state = NO_VAL;
 			list_push(delete_record_list, delete_record);
 
 			slurm_attr_init(&attr_agent);
 			if (pthread_attr_setdetachstate(
 				    &attr_agent, 
-				    PTHREAD_CREATE_JOINABLE))
+				    PTHREAD_CREATE_DETACHED))
 				error("pthread_attr_setdetach"
 				      "state error %m");
 			
@@ -245,6 +246,7 @@ int main(int argc, char *argv[])
 				/* sleep and retry */
 				usleep(1000);	
 			}
+			pthread_attr_destroy(&attr_agent);
 			num_block_to_free++;
 		}
 		if ((rc = rm_free_partition_list(block_list)) != STATUS_OK) {
@@ -264,13 +266,13 @@ int main(int argc, char *argv[])
 
 static int _free_block(delete_record_t *delete_record)
 {
-	int state=-1;
+	int state=NO_VAL;
 	int rc;
 	int i=0;
 	info("freeing bgblock %s", delete_record->bg_block_id);
 	_term_jobs_on_block(delete_record->bg_block_id);
 	while (1) {
-		if (delete_record->state != -1
+		if (delete_record->state != NO_VAL
 		    && delete_record->state != RM_PARTITION_FREE 
 		    && delete_record->state != RM_PARTITION_DEALLOCATING) {
 			info("pm_destroy %s",delete_record->bg_block_id);
@@ -320,7 +322,7 @@ static int _update_bg_record_state()
 	if ((rc = rm_get_data(block_list, RM_PartListSize, &num_blocks))
 	    != STATUS_OK) {
 		error("rm_get_data(RM_PartListSize): %s", _bg_err_str(rc));
-		state = -1;
+		state = NO_VAL;
 		num_blocks = 0;
 	}
 	
@@ -332,7 +334,7 @@ static int _update_bg_record_state()
 			    != STATUS_OK) {
 				error("rm_get_data(RM_PartListNextPart): %s",
 				      _bg_err_str(rc));
-				state = -1;
+				state = NO_VAL;
 				break;
 			}
 		} else {
@@ -342,7 +344,7 @@ static int _update_bg_record_state()
 			    != STATUS_OK) {
 				error("rm_get_data(RM_PartListFirstPart: %s",
 				      _bg_err_str(rc));
-				state = -1;
+				state = NO_VAL;
 				break;
 			}
 		}
@@ -352,7 +354,7 @@ static int _update_bg_record_state()
 		    != STATUS_OK) {
 			error("rm_get_data(RM_PartitionID): %s",
 			      _bg_err_str(rc));
-			state = -1;
+			state = NO_VAL;
 			break;
 		}
 		
@@ -370,13 +372,13 @@ static int _update_bg_record_state()
 				continue;
 			}
 		
-			if(state == -1)
+			if(state == NO_VAL)
 				goto clean_up;
 			else if(j>=num_blocks) {
 				error("This bgblock, %s, "
 				      "doesn't exist in MMCS",
 				      bg_block_id);
-				state = -1;
+				state = NO_VAL;
 				goto clean_up;
 			}
 			
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index 3c658d39a78..21c4c2d3c6f 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -1299,3 +1299,10 @@ extern int select_p_get_info_from_plugin(enum select_data_info info,
 	}
 	return rc;
 }
+
+extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
+{	
+	return SLURM_SUCCESS;
+}
+
+#undef __SELECT_CR_DEBUG
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 2b366442756..8b7abe665bd 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -184,7 +184,7 @@ extern int select_p_block_init(List part_list)
  *	select_p_job_test is called
  */
 extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-		int min_nodes, int max_nodes, bool test_only)
+			     int min_nodes, int max_nodes, bool test_only)
 {
 	int i, index, error_code = EINVAL, sufficient;
 	int *consec_nodes;	/* how many nodes we can add from this 
@@ -493,3 +493,8 @@ extern int select_p_get_info_from_plugin (enum select_data_info info, void *data
 {
        return SLURM_SUCCESS;
 }
+
+extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
+{
+	return SLURM_SUCCESS;
+}
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index 090bd7232c5..fe4d8d64aa1 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -1505,10 +1505,11 @@ _update_job (int argc, char *argv[])
 					(char **) NULL, 10);
 			update_cnt++;
 		}
-#if SYSTEM_DIMENSIONS
+#ifdef HAVE_BG
 		else if (strncasecmp(argv[i], "Geometry=", 9) == 0) {
 			char* token, *delimiter = ",x", *next_ptr;
 			int j, rc = 0;
+			uint16_t geo[SYSTEM_DIMENSIONS];
 			char* geometry_tmp = xstrdup(&argv[i][9]);
 			char* original_ptr = geometry_tmp;
 			token = strtok_r(geometry_tmp, delimiter, &next_ptr);
@@ -1519,8 +1520,8 @@ _update_job (int argc, char *argv[])
 					rc = -1;
 					break;
 				}
-				job_msg.geometry[j] = atoi(token);
-				if (job_msg.geometry[j] <= 0) {
+				geo[j] = (uint16_t) atoi(token);
+				if (geo[j] <= 0) {
 					error("invalid --geometry argument");
 					rc = -1;
 					break;
@@ -1538,36 +1539,47 @@ _update_job (int argc, char *argv[])
 				xfree(original_ptr);
 			if (rc != 0) {
 				for (j=0; j<SYSTEM_DIMENSIONS; j++)
-					job_msg.geometry[j] = (uint16_t) NO_VAL;
+					geo[j] = (uint16_t) NO_VAL;
 				exit_code = 1;
 			} else
 				update_cnt++;
+			select_g_set_jobinfo(&job_msg.select_jobinfo,
+					     SELECT_DATA_GEOMETRY,
+					     geo);			
 		}
-#endif
+
 		else if (strncasecmp(argv[i], "Rotate=", 7) == 0) {
+			int16_t rotate;
 			if (strcasecmp(&argv[i][7], "yes") == 0)
-				job_msg.rotate = 1;
+				rotate = 1;
 			else if (strcasecmp(&argv[i][7], "no") == 0)
-				job_msg.rotate = 0;
+				rotate = 0;
 			else
-				job_msg.rotate = 
-					(uint32_t) strtol(&argv[i][7], 
-						(char **) NULL, 10);
+				rotate = (uint16_t) strtol(&argv[i][7], 
+							   (char **) NULL, 10);
+			select_g_set_jobinfo(&job_msg.select_jobinfo,
+					     SELECT_DATA_ROTATE,
+					     rotate);
 			update_cnt++;
 		}
 		else if (strncasecmp(argv[i], "Connection=", 11) == 0) {
+			int16_t conn_type;
 			if (strcasecmp(&argv[i][11], "torus") == 0)
-				job_msg.conn_type = SELECT_TORUS;
+				conn_type = SELECT_TORUS;
 			else if (strcasecmp(&argv[i][11], "mesh") == 0)
-				job_msg.conn_type = SELECT_MESH;
+				conn_type = SELECT_MESH;
 			else if (strcasecmp(&argv[i][11], "nav") == 0)
-				job_msg.conn_type = SELECT_NAV;
+				conn_type = SELECT_NAV;
 			else
-				job_msg.conn_type = 
+				conn_type = 
 					(uint16_t) strtol(&argv[i][11], 
 							(char **) NULL, 10);
+			select_g_set_jobinfo(&job_msg.select_jobinfo,
+					     SELECT_DATA_CONN_TYPE,
+					     conn_type);
 			update_cnt++;
 		}
+#endif
 		else if (strncasecmp(argv[i], "StartTime=", 10) == 0) {
 			job_msg.begin_time = parse_time(&argv[i][10]);
 			update_cnt++;
diff --git a/src/sinfo/print.c b/src/sinfo/print.c
index f0e7e851bab..47df33e8d94 100644
--- a/src/sinfo/print.c
+++ b/src/sinfo/print.c
@@ -83,9 +83,8 @@ int print_sinfo_entry(sinfo_data_t *sinfo_data)
 	sinfo_format_t *current;
 
 	while ((current = (sinfo_format_t *) list_next(i)) != NULL) {
-		if (current->
-		    function(sinfo_data, current->width, 
-					current->right_justify, current->suffix)
+		if (current->function(sinfo_data, current->width, 
+				      current->right_justify, current->suffix)
 		    != SLURM_SUCCESS)
 			return SLURM_ERROR;
 	}
@@ -160,15 +159,23 @@ static int _print_secs(long time, int width, bool right, bool cut_output)
 static int 
 _build_min_max_string(char *buffer, int buf_size, int min, int max, bool range)
 {
+	int i;
+	char tmp_min[7];
+	char tmp_max[7];
+	convert_to_kilo(min, tmp_min);
+	convert_to_kilo(max, tmp_max);
+	
 	if (max == min)
-		return snprintf(buffer, buf_size, "%d", max);
+		return snprintf(buffer, buf_size, "%s", tmp_max);
 	else if (range) {
 		if (max == INFINITE)
-			return snprintf(buffer, buf_size, "%d-infinite", min);
+			return snprintf(buffer, buf_size, "%s-infinite", 
+					tmp_min);
 		else
-			return snprintf(buffer, buf_size, "%d-%d", min, max);
+			return snprintf(buffer, buf_size, "%s-%s", 
+					tmp_min, tmp_max);
 	} else
-		return snprintf(buffer, buf_size, "%d+", min);
+		return snprintf(buffer, buf_size, "%s+", tmp_min);
 }
 
 int
@@ -337,8 +344,13 @@ int _print_node_list(sinfo_data_t * sinfo_data, int width,
 		hostlist_ranged_string(sinfo_data->nodes, 
 					sizeof(tmp), tmp);
 		_print_str(tmp, width, right_justify, true);
-	} else
-		_print_str("NODELIST", width, right_justify, true);
+	} else {
+#ifdef HAVE_BG
+		_print_str("BP_LIST", width, right_justify, false);
+#else
+		_print_str("NODELIST", width, right_justify, false);
+#endif
+	}
 
 	if (suffix)
 		printf("%s", suffix);
@@ -346,11 +358,14 @@ int _print_node_list(sinfo_data_t * sinfo_data, int width,
 }
 
 int _print_nodes_t(sinfo_data_t * sinfo_data, int width,
-			bool right_justify, char *suffix)
+		   bool right_justify, char *suffix)
 {
 	char id[FORMAT_STRING_SIZE];
+	char tmp[7];
 	if (sinfo_data) {
-		snprintf(id, FORMAT_STRING_SIZE, "%u", sinfo_data->nodes_tot);
+		convert_to_kilo(sinfo_data->nodes_tot, tmp);
+	
+		snprintf(id, FORMAT_STRING_SIZE, "%s", tmp);
 		_print_str(id, width, right_justify, true);
 	} else
 		_print_str("NODES", width, right_justify, true);
@@ -361,12 +376,17 @@ int _print_nodes_t(sinfo_data_t * sinfo_data, int width,
 }
 
 int _print_nodes_ai(sinfo_data_t * sinfo_data, int width,
-			bool right_justify, char *suffix)
+		    bool right_justify, char *suffix)
 {
 	char id[FORMAT_STRING_SIZE];
+	char tmpa[7];
+	char tmpi[7];
 	if (sinfo_data) {
-		snprintf(id, FORMAT_STRING_SIZE, "%u/%u", 
-		         sinfo_data->nodes_alloc, sinfo_data->nodes_idle);
+		convert_to_kilo(sinfo_data->nodes_alloc, tmpa);
+		convert_to_kilo(sinfo_data->nodes_idle, tmpi);
+	
+		snprintf(id, FORMAT_STRING_SIZE, "%s/%s", 
+		         tmpa, tmpi);
 		_print_str(id, width, right_justify, true);
 	} else
 		_print_str("NODES(A/I)", width, right_justify, true);
@@ -380,10 +400,17 @@ int _print_nodes_aiot(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix)
 {
 	char id[FORMAT_STRING_SIZE];
+	char tmpa[7];
+	char tmpi[7];
+	char tmpo[7];
+	char tmpt[7];
 	if (sinfo_data) {
-		snprintf(id, FORMAT_STRING_SIZE, "%u/%u/%u/%u", 
-		         sinfo_data->nodes_alloc, sinfo_data->nodes_idle,
-		         sinfo_data->nodes_other, sinfo_data->nodes_tot);
+		convert_to_kilo(sinfo_data->nodes_alloc, tmpa);
+		convert_to_kilo(sinfo_data->nodes_idle, tmpi);
+		convert_to_kilo(sinfo_data->nodes_other, tmpo);
+		convert_to_kilo(sinfo_data->nodes_tot, tmpt);
+		snprintf(id, FORMAT_STRING_SIZE, "%s/%s/%s/%s", 
+		         tmpa, tmpi, tmpo, tmpt);
 		_print_str(id, width, right_justify, true);
 	} else
 		_print_str("NODES(A/I/O/T)", width, right_justify, true);
@@ -493,9 +520,9 @@ int _print_size(sinfo_data_t * sinfo_data, int width,
 			    (sinfo_data->part_info->max_nodes > 0))
 				sinfo_data->part_info->min_nodes = 1;
 			_build_min_max_string(id, FORMAT_STRING_SIZE, 
-		                      sinfo_data->part_info->min_nodes, 
-		                      sinfo_data->part_info->max_nodes,
-		                      true);
+					      sinfo_data->part_info->min_nodes,
+					      sinfo_data->part_info->max_nodes,
+					      true);
 			_print_str(id, width, right_justify, true);
 		}
 	} else
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index 7926fd9e108..f62468557cf 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -54,7 +54,7 @@ static int  _build_sinfo_data(List sinfo_list,
 		partition_info_msg_t *partition_msg,
 		node_info_msg_t *node_msg);
 static void _create_sinfo(List sinfo_list, partition_info_t* part_ptr, 
-		uint16_t part_inx, node_info_t *node_ptr);
+			  uint16_t part_inx, node_info_t *node_ptr);
 static bool _filter_out(node_info_t *node_ptr);
 static void _sinfo_list_delete(void *data);
 static node_info_t *_find_node(char *node_name, node_info_msg_t *node_msg); 
@@ -182,9 +182,9 @@ static int _bg_report(void)
 	}
 
 	if (!params.no_header)
-		printf("BG_BLOCK        NODES        OWNER    STATE    CONNECTION USE\n");
+		printf("BG_BLOCK         NODES        OWNER    STATE    CONNECTION USE\n");
 /*                      1234567890123456 123456789012 12345678 12345678 1234567890 12345+ */
-/*                      RMP_22Apr1544018 bg[123x456] name     READY    TORUS      COPROCESSOR */
+/*                      RMP_22Apr1544018 bg[123x456]  name     READY    TORUS      COPROCESSOR */
 
 	for (i=0; i<new_bg_ptr->record_count; i++) {
 		printf("%-16.16s %-12.12s %-8.8s %-8.8s %-10.10s %s\n",
@@ -216,7 +216,7 @@ _query_server(partition_info_msg_t ** part_pptr,
 
 	if (params.all_flag)
 		show_flags |= SHOW_ALL;
-
+		
 	if (old_part_ptr) {
 		error_code =
 		    slurm_load_partitions(old_part_ptr->last_update,
@@ -238,7 +238,7 @@ _query_server(partition_info_msg_t ** part_pptr,
 
 	old_part_ptr = new_part_ptr;
 	*part_pptr = new_part_ptr;
-
+	
 	if (old_node_ptr) {
 		error_code =
 		    slurm_load_node(old_node_ptr->last_update,
@@ -265,13 +265,14 @@ _query_server(partition_info_msg_t ** part_pptr,
 /*
  * _build_sinfo_data - make a sinfo_data entry for each unique node 
  *	configuration and add it to the sinfo_list for later printing.
- * sinfo_list IN/OUT - list of unique sinfo_dataa records to report
+ * sinfo_list IN/OUT - list of unique sinfo_data records to report
  * partition_msg IN - partition info message
  * node_msg IN - node info message
  * RET zero or error code 
  */
 static int _build_sinfo_data(List sinfo_list, 
-		partition_info_msg_t *partition_msg, node_info_msg_t *node_msg)
+			     partition_info_msg_t *partition_msg, 
+			     node_info_msg_t *node_msg)
 {
 	node_info_t *node_ptr;
 	partition_info_t* part_ptr;
@@ -280,6 +281,7 @@ static int _build_sinfo_data(List sinfo_list,
 	hostlist_t hl;
 	sinfo_data_t *sinfo_ptr;
 	char *node_name = NULL;
+	int offset = 0;
 
 	/* by default every partition is shown, even if no nodes */
 	if ((!params.node_flag) && params.match_flags.partition_flag) {
@@ -288,13 +290,14 @@ static int _build_sinfo_data(List sinfo_list,
 			if ((!params.partition) || 
 			    (_strcmp(params.partition, part_ptr->name) == 0))
 				_create_sinfo(sinfo_list, part_ptr, 
-						(uint16_t) j, NULL);
+					      (uint16_t) j, NULL);
 		}
 	}
 
 	/* make sinfo_list entries for every node in every partition */
 	for (j=0; j<partition_msg->record_count; j++, part_ptr++) {
 		part_ptr = &(partition_msg->partition_array[j]);
+		
 		if (params.filtering && params.partition
 		&&  _strcmp(part_ptr->name, params.partition))
 			continue;
@@ -323,13 +326,13 @@ static int _build_sinfo_data(List sinfo_list,
 			/* if no match, create new sinfo_data entry */
 			if (sinfo_ptr == NULL) {
 				_create_sinfo(sinfo_list, part_ptr, 
-					(uint16_t) j, node_ptr);
+					      (uint16_t) j, node_ptr);
 			}
 			list_iterator_destroy(i);
 		}
 		hostlist_destroy(hl);
 	}
-
+		
 	_sort_hostlist(sinfo_list);
 	return SLURM_SUCCESS;
 }
@@ -483,7 +486,11 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 {
 	uint16_t base_state;
+	int offset = 1;
 
+	if(sinfo_ptr->part_info->max_offset)
+		offset = sinfo_ptr->part_info->max_offset;
+	
 	if (sinfo_ptr->nodes_tot == 0) {	/* first node added */
 		sinfo_ptr->node_state = node_ptr->node_state;
 		sinfo_ptr->features   = node_ptr->features;
@@ -524,16 +531,15 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 
 	base_state = node_ptr->node_state & NODE_STATE_BASE;
 	if (node_ptr->node_state & NODE_STATE_DRAIN)
-		sinfo_ptr->nodes_other++;
+		sinfo_ptr->nodes_other += offset;
 	else if ((base_state == NODE_STATE_ALLOCATED)
 	||       (node_ptr->node_state & NODE_STATE_COMPLETING))
-		sinfo_ptr->nodes_alloc++;
+		sinfo_ptr->nodes_alloc += offset;
 	else if (base_state == NODE_STATE_IDLE)
-		sinfo_ptr->nodes_idle++;
+		sinfo_ptr->nodes_idle += offset;
 	else 
-		sinfo_ptr->nodes_other++;
-	sinfo_ptr->nodes_tot++;
-
+		sinfo_ptr->nodes_other += offset;
+	sinfo_ptr->nodes_tot += offset;
 	hostlist_push(sinfo_ptr->nodes, node_ptr->name);
 }
 
@@ -545,28 +551,29 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
  * node_ptr IN       - pointer to node record to add
  */
 static void _create_sinfo(List sinfo_list, partition_info_t* part_ptr, 
-		uint16_t part_inx, node_info_t *node_ptr)
+			  uint16_t part_inx, node_info_t *node_ptr)
 {
 	sinfo_data_t *sinfo_ptr;
-
+	int offset = 1;
 	/* create an entry */
 	sinfo_ptr = xmalloc(sizeof(sinfo_data_t));
 
 	sinfo_ptr->part_info = part_ptr;
-
+	if(sinfo_ptr->part_info->max_offset) {
+		offset = sinfo_ptr->part_info->max_offset;
+	}
 	if (node_ptr) {
 		uint16_t base_state = node_ptr->node_state & 
 			NODE_STATE_BASE;
 		sinfo_ptr->node_state = node_ptr->node_state;
 		if ((base_state == NODE_STATE_ALLOCATED)
 		||  (node_ptr->node_state & NODE_STATE_COMPLETING))
-			sinfo_ptr->nodes_alloc++;
+			sinfo_ptr->nodes_alloc += offset;
 		else if (base_state == NODE_STATE_IDLE)
-			sinfo_ptr->nodes_idle++;
+			sinfo_ptr->nodes_idle += offset;
 		else 
-			sinfo_ptr->nodes_other++;
-		sinfo_ptr->nodes_tot++;
-
+			sinfo_ptr->nodes_other += offset;
+		sinfo_ptr->nodes_tot += offset;
 		sinfo_ptr->min_cpus = node_ptr->cpus;
 		sinfo_ptr->max_cpus = node_ptr->cpus;
 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 4ab9a3c98fe..ab2faf3e7b1 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -488,12 +488,12 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
         pack_time(dump_job_ptr->pre_sus_time, buffer);
 
 	pack16((uint16_t) dump_job_ptr->job_state, buffer);
-	pack16(dump_job_ptr->next_step_id, buffer);
-	pack16(dump_job_ptr->kill_on_node_fail, buffer);
-	pack16(dump_job_ptr->kill_on_step_done, buffer);
-	pack16(dump_job_ptr->batch_flag, buffer);
-	pack16(dump_job_ptr->port, buffer);
-	pack16(dump_job_ptr->mail_type, buffer);
+	pack16((uint16_t)dump_job_ptr->next_step_id, buffer);
+	pack16((uint16_t)dump_job_ptr->kill_on_node_fail, buffer);
+	pack16((uint16_t)dump_job_ptr->kill_on_step_done, buffer);
+	pack16((uint16_t)dump_job_ptr->batch_flag, buffer);
+	pack16((uint16_t)dump_job_ptr->port, buffer);
+	pack16((uint16_t)dump_job_ptr->mail_type, buffer);
 
 	packstr(dump_job_ptr->host, buffer);
 	packstr(dump_job_ptr->nodes, buffer);
@@ -839,14 +839,14 @@ static void _dump_job_step_state(struct step_record *step_ptr, Buf buffer)
 {
 	pack16((uint16_t) step_ptr->step_id, buffer);
 	pack16((uint16_t) step_ptr->cyclic_alloc, buffer);
-	pack16(step_ptr->port, buffer);
+	pack16((uint16_t)step_ptr->port, buffer);
 	pack32(step_ptr->num_tasks, buffer);
 	pack_time(step_ptr->start_time, buffer);
 	packstr(step_ptr->host,  buffer);
 	packstr(step_ptr->step_node_list,  buffer);
 	packstr(step_ptr->name, buffer);
 	packstr(step_ptr->network, buffer);
-	pack16(step_ptr->batch_step, buffer);
+	pack16((uint16_t)step_ptr->batch_step, buffer);
 	if (!step_ptr->batch_step)
 		switch_pack_jobinfo(step_ptr->switch_job, buffer);
 	checkpoint_pack_jobinfo(step_ptr->check_job, buffer);
@@ -1368,7 +1368,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, int will_run,
 
 	no_alloc = test_only || too_fragmented || 
 			(!top_prio) || (!independent);
-
+	
 	error_code = select_nodes(job_ptr, no_alloc);
 	if ((error_code == ESLURM_NODES_BUSY) ||
 	    (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
@@ -1668,6 +1668,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	bitstr_t *req_bitmap = NULL, *exc_bitmap = NULL;
 	bool super_user = false;
 	struct job_record *job_ptr;
+	uint16_t geo[SYSTEM_DIMENSIONS];
+
+	select_g_alter_node_cnt(SELECT_SET_NODE_CNT, job_desc);
 
 	*job_pptr = (struct job_record *) NULL;
 	if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid)))
@@ -1761,11 +1764,14 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	if (job_desc->min_nodes == NO_VAL)
 		job_desc->min_nodes = 1;
 #if SYSTEM_DIMENSIONS
-	if ((job_desc->geometry[0] != (uint16_t) NO_VAL)
-	&&  (job_desc->geometry[0] != 0)) {
+	select_g_get_jobinfo(job_desc->select_jobinfo,
+			     SELECT_DATA_GEOMETRY,
+			     &geo);
+	if ((geo[0] != (uint16_t) NO_VAL) && (geo[0] != 0)) {
 		int i, tot = 1;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			tot *= job_desc->geometry[i];
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+			tot *= geo[i];
+		}
 		if (job_desc->min_nodes > tot) {
 			info("MinNodes(%d) > GeometryNodes(%d)", 
 				job_desc->min_nodes, tot);
@@ -2283,22 +2289,8 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	if (job_desc->work_dir)
 		detail_ptr->work_dir = xstrdup(job_desc->work_dir);
 	detail_ptr->begin_time = job_desc->begin_time;
-
-	if (select_g_alloc_jobinfo(&job_ptr->select_jobinfo))
-		return SLURM_ERROR;
-#if SYSTEM_DIMENSIONS
-	select_g_set_jobinfo(job_ptr->select_jobinfo,
-		SELECT_DATA_GEOMETRY, 
-		job_desc->geometry);
-#endif
-	select_g_set_jobinfo(job_ptr->select_jobinfo,
-		SELECT_DATA_CONN_TYPE, 
-		&job_desc->conn_type);
-	if (job_desc->conn_type == (uint16_t) NO_VAL)
-		job_desc->conn_type = SELECT_NAV;
-	select_g_set_jobinfo(job_ptr->select_jobinfo,
-		SELECT_DATA_ROTATE, 
-		&job_desc->rotate);
+	job_ptr->select_jobinfo = 
+		select_g_copy_jobinfo(job_desc->select_jobinfo);
 
 	*job_rec_ptr = job_ptr;
 	return SLURM_SUCCESS;
@@ -2415,7 +2407,7 @@ static void _job_timed_out(struct job_record *job_ptr)
  */
 static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate, 
 			      uid_t submit_uid)
-{
+{	
 	if ((job_desc_msg->num_procs == NO_VAL) &&
 	    (job_desc_msg->min_nodes == NO_VAL) &&
 	    (job_desc_msg->req_nodes == NULL)) {
@@ -2490,19 +2482,7 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
 		job_desc_msg->shared = 0;	/* default not shared nodes */
 	if (job_desc_msg->min_procs == NO_VAL)
 		job_desc_msg->min_procs = 1;	/* default 1 cpu per node */
-
-#if SYSTEM_DIMENSIONS
-	if (job_desc_msg->geometry[0] == (uint16_t) NO_VAL) {
-		int i;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			job_desc_msg->geometry[i] = 0;
-	}
-#endif
-	if (job_desc_msg->conn_type == (uint16_t) NO_VAL)
-		job_desc_msg->conn_type = SELECT_NAV;  /* try TORUS, then MESH */
-	if (job_desc_msg->rotate == (uint16_t) NO_VAL)
-		job_desc_msg->rotate = true;    /* default to allow rotate */
-
+	
 	return SLURM_SUCCESS;
 }
 
@@ -2673,17 +2653,17 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer)
 {
 	struct job_details *detail_ptr;
 
-	pack32(dump_job_ptr->job_id, buffer);
-	pack32(dump_job_ptr->user_id, buffer);
-	pack32(dump_job_ptr->group_id, buffer);
+	pack32((uint32_t)dump_job_ptr->job_id, buffer);
+	pack32((uint32_t)dump_job_ptr->user_id, buffer);
+	pack32((uint32_t)dump_job_ptr->group_id, buffer);
 
 	pack16((uint16_t) dump_job_ptr->job_state, buffer);
 	pack16((uint16_t) dump_job_ptr->batch_flag, buffer);
-	pack32(dump_job_ptr->alloc_sid, buffer);
+	pack32((uint32_t)dump_job_ptr->alloc_sid, buffer);
 	if ((dump_job_ptr->time_limit == NO_VAL) && dump_job_ptr->part_ptr)
-		pack32(dump_job_ptr->part_ptr->max_time, buffer);
+		pack32((uint32_t)dump_job_ptr->part_ptr->max_time, buffer);
 	else
-		pack32(dump_job_ptr->time_limit, buffer);
+		pack32((uint32_t)dump_job_ptr->time_limit, buffer);
 
 	if (dump_job_ptr->details)
 		 pack_time(dump_job_ptr->details->submit_time, buffer);
@@ -2701,18 +2681,18 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer)
 	pack_time(dump_job_ptr->suspend_time, buffer);
 	pack_time(dump_job_ptr->pre_sus_time, buffer);
 
-	pack32(dump_job_ptr->priority, buffer);
+	pack32((uint32_t)dump_job_ptr->priority, buffer);
 
 	packstr(dump_job_ptr->nodes, buffer);
 	packstr(dump_job_ptr->partition, buffer);
 	packstr(dump_job_ptr->account, buffer);
 	packstr(dump_job_ptr->network, buffer);
-	pack32(dump_job_ptr->dependency, buffer);
+	pack32((uint32_t)dump_job_ptr->dependency, buffer);
 
 	packstr(dump_job_ptr->name, buffer);
 	packstr(dump_job_ptr->alloc_node, buffer);
 	pack_bit_fmt(dump_job_ptr->node_bitmap, buffer);
-	pack32(dump_job_ptr->num_procs, buffer);
+	pack32((uint32_t)dump_job_ptr->num_procs, buffer);
 	
 	select_g_pack_jobinfo(dump_job_ptr->select_jobinfo, buffer);
 
@@ -2735,7 +2715,9 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer)
 /* pack job details for "get_job_info" RPC */
 static void _pack_job_details(struct job_details *detail_ptr, Buf buffer)
 {
-	if (detail_ptr) {
+	uint32_t altered;
+	
+	if (detail_ptr) {		
 		pack32((uint32_t) detail_ptr->min_nodes, buffer);
 		pack16((uint16_t) detail_ptr->shared, buffer);
 		pack16((uint16_t) detail_ptr->contiguous, buffer);
@@ -3002,6 +2984,12 @@ void reset_job_priority(void)
  */
 static bool _top_priority(struct job_record *job_ptr)
 {
+#ifdef HAVE_BG
+	/* On BlueGene, all jobs run ASAP. 
+	 * Priority only matters within a specific job size. */
+	return true;
+
+#else
 	struct job_details *detail_ptr = job_ptr->details;
 	bool top;
 
@@ -3037,6 +3025,7 @@ static bool _top_priority(struct job_record *job_ptr)
 			detail_ptr->wait_reason = WAIT_PRIORITY;
 	}
 	return top;
+#endif
 }
 
 
@@ -3332,40 +3321,6 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			error_code = ESLURM_DISABLED;
 	}
 
-#if SYSTEM_DIMENSIONS
-	if (job_specs->geometry[0] != (uint16_t) NO_VAL) {
-		int i, tot = 1;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			tot *= job_specs->geometry[i];
-		detail_ptr->min_nodes = tot;
-		detail_ptr->max_nodes = tot;
-		select_g_set_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_GEOMETRY,
-			job_specs->geometry);
-		 info("update_job: setting geometry to %ux%ux%u for job_id %u",
-			job_specs->geometry[0], job_specs->geometry[1], 
-			job_specs->geometry[2], job_ptr->job_id);
-	}
-#endif
-
-#ifdef HAVE_BG
-	if (job_specs->conn_type != (uint16_t) NO_VAL) {
-		select_g_set_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_CONN_TYPE,
-			&job_specs->conn_type);
-		info("update_job: setting conn_type to %u for job_id %u",
-			job_specs->conn_type, job_ptr->job_id);
-	}
-
-	if (job_specs->rotate != (uint16_t) NO_VAL) {
-		select_g_set_jobinfo(job_ptr->select_jobinfo,
-			SELECT_DATA_ROTATE,
-			&job_specs->rotate);
-		info("update_job: setting rotate to %u for job_id %u",
-			job_specs->rotate, job_ptr->job_id);
-	}
-#endif
-
 	return error_code;
 }
 
@@ -3527,8 +3482,8 @@ kill_job_on_node(uint32_t job_id, struct job_record *job_ptr,
 	kill_req = xmalloc(sizeof(kill_job_msg_t));
 	kill_req->job_id	= job_id;
 	if (job_ptr) {  /* NULL if unknown */
-		kill_req->select_jobinfo = select_g_copy_jobinfo(
-			job_ptr->select_jobinfo);
+		kill_req->select_jobinfo = 
+			select_g_copy_jobinfo(job_ptr->select_jobinfo);
 	}
 
 	agent_info = xmalloc(sizeof(agent_arg_t));
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 62db52fe68a..15db316f91b 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -67,8 +67,8 @@ struct node_set {		/* set of nodes with same configuration */
 };
 
 static int _add_node_set_info(struct node_set *node_set_ptr, 
-			       bitstr_t ** node_bitmap, 
-			       int *node_cnt, int *cpu_cnt, int cr_enabled);
+			      bitstr_t ** node_bitmap, 
+			      int *node_cnt, int *cpu_cnt, int cr_enabled);
 static int  _build_node_list(struct job_record *job_ptr, 
 			     struct node_set **node_set_pptr,
 			     int *node_set_size);
@@ -79,15 +79,15 @@ static int _nodes_in_sets(bitstr_t *req_bitmap,
 			  struct node_set * node_set_ptr, 
 			  int node_set_size);
 static void _node_load_bitmaps(bitstr_t * bitmap, bitstr_t ** no_load_bit, 
-				bitstr_t ** light_load_bit, 
-				bitstr_t ** heavy_load_bit);
+			       bitstr_t ** light_load_bit, 
+			       bitstr_t ** heavy_load_bit);
 static int _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap, 
-			int min_nodes, int max_nodes);
+			   int min_nodes, int max_nodes, bool test_only);
 static int _pick_best_nodes(struct node_set *node_set_ptr, 
-			int node_set_size, bitstr_t ** select_bitmap, 
-			struct job_record *job_ptr, uint32_t min_nodes, 
-			uint32_t max_nodes, int shared, 
-			uint32_t node_lim);
+			    int node_set_size, bitstr_t ** select_bitmap, 
+			    struct job_record *job_ptr, uint32_t min_nodes, 
+			    uint32_t max_nodes, int shared, 
+			    uint32_t node_lim);
 static int _valid_features(char *requested, char *available);
 
 
@@ -271,7 +271,7 @@ static int _match_feature(char *seek, char *available)
  */
 static int
 _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap, 
-		int min_nodes, int max_nodes)
+		int min_nodes, int max_nodes, bool test_only)
 {
 	bitstr_t *no_load_bit, *light_load_bit, *heavy_load_bit;
 	int error_code;
@@ -289,13 +289,15 @@ _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap,
 		bit_or(bitmap, job_ptr->details->req_node_bitmap);
 	
 	error_code = select_g_job_test(job_ptr, bitmap, 
-			min_nodes, max_nodes, false);
+				       min_nodes, max_nodes, 
+				       test_only);
 
 	/* now try to use idle and lightly loaded nodes */
 	if (error_code) {
 		bit_or(bitmap, light_load_bit);
 		error_code = select_g_job_test(job_ptr, bitmap, 
-				min_nodes, max_nodes, false);
+					       min_nodes, max_nodes, 
+					       test_only);
 	} 
 	FREE_NULL_BITMAP(light_load_bit);
 
@@ -303,7 +305,8 @@ _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap,
 	if (error_code) {
 		bit_or(bitmap, heavy_load_bit);
 		error_code = select_g_job_test(job_ptr, bitmap, 
-				min_nodes, max_nodes, false);
+					       min_nodes, max_nodes, 
+					       test_only);
 	}
 	FREE_NULL_BITMAP(heavy_load_bit);
 
@@ -438,7 +441,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
                 }
         }
 
-	if (job_ptr->details->req_node_bitmap) {	/* specific nodes required */
+	if (job_ptr->details->req_node_bitmap) {  /* specific nodes required */
 		/* we have already confirmed that all of these nodes have a
 		 * usable configuration and are in the proper partition */
 		if (min_nodes != 0)
@@ -446,12 +449,14 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				job_ptr->details->req_node_bitmap);
 		if (job_ptr->num_procs != 0) {
 			if (cr_enabled) {
-				error_code = select_g_get_extra_jobinfo (NULL, 
-								 job_ptr, 
-                                                                 SELECT_CR_CPU_COUNT, 
-                                                                 &total_cpus);
+				error_code = select_g_get_extra_jobinfo (
+					NULL, 
+					job_ptr, 
+					SELECT_CR_CPU_COUNT, 
+					&total_cpus);
 				if (error_code != SLURM_SUCCESS) {
-					FREE_NULL_BITMAP(partially_idle_node_bitmap);
+					FREE_NULL_BITMAP(
+						partially_idle_node_bitmap);
 					return error_code;
 			}
                   } else 
@@ -600,13 +605,15 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				pick_code = _pick_best_load(job_ptr, 
 							    avail_bitmap, 
 							    min_nodes, 
-							    max_nodes);
-			} else 
+							    max_nodes,
+							    false);
+			} else {
 				pick_code = select_g_job_test(job_ptr, 
 							      avail_bitmap, 
 							      min_nodes, 
-							      max_nodes, false);
-			
+							      max_nodes,
+							      false);
+			}
 			if (pick_code == SLURM_SUCCESS) {
 				if ((node_lim != INFINITE) && 
 				    (bit_set_count(avail_bitmap) > node_lim)) {
@@ -628,13 +635,15 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		    (avail_nodes >= min_nodes) &&
 		    (avail_nodes <  max_nodes)) {
 			pick_code = select_g_job_test(job_ptr, avail_bitmap, 
-						min_nodes, max_nodes, false);
+						      min_nodes, max_nodes,
+						      false);
 			if ((pick_code == SLURM_SUCCESS) &&
 			    ((node_lim == INFINITE) ||
 			     (bit_set_count(avail_bitmap) <= node_lim))) {
 				FREE_NULL_BITMAP(total_bitmap);
                                 if (cr_enabled) 
-					FREE_NULL_BITMAP(partially_idle_node_bitmap);
+					FREE_NULL_BITMAP(
+						partially_idle_node_bitmap);
 				*select_bitmap = avail_bitmap;
 				return SLURM_SUCCESS;
 			}
@@ -644,12 +653,12 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		 * nodes available) */
 
 		if ((!runable_ever || !runable_avail)
-		&&  (total_nodes >= min_nodes)
-		&&  ((slurmctld_conf.fast_schedule == 0) ||
-		     (total_cpus >= job_ptr->num_procs))
-		&&  ((job_ptr->details->req_node_bitmap == NULL) ||
-		     (bit_super_set(job_ptr->details->req_node_bitmap, 
-				total_bitmap)))) {
+		    &&  (total_nodes >= min_nodes)
+		    &&  ((slurmctld_conf.fast_schedule == 0) ||
+			 (total_cpus >= job_ptr->num_procs))
+		    &&  ((job_ptr->details->req_node_bitmap == NULL) ||
+			 (bit_super_set(job_ptr->details->req_node_bitmap, 
+					total_bitmap)))) {
 			if (!runable_avail) {
 				FREE_NULL_BITMAP(avail_bitmap);
 				avail_bitmap = bit_copy(total_bitmap);
@@ -659,7 +668,10 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				pick_code = select_g_job_test(job_ptr, 
 							      avail_bitmap, 
 							      min_nodes, 
-							      max_nodes, true);
+							      max_nodes,
+							      true);
+                                if (cr_enabled)
+                                        job_ptr->cr_enabled = 1;
 				if (pick_code == SLURM_SUCCESS) {
 					runable_ever  = true;
 					if ((node_lim == INFINITE) ||
@@ -672,7 +684,10 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				pick_code = select_g_job_test(job_ptr, 
 							      total_bitmap, 
 							      min_nodes, 
-							      max_nodes, true);
+							      max_nodes,
+							      true);
+                                if (cr_enabled)
+                                        job_ptr->cr_enabled = 1;
 				if (pick_code == SLURM_SUCCESS)
 					runable_ever = true;
 			}
@@ -694,6 +709,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
 		info("_pick_best_nodes: job never runnable");
 	}
+
 	if (error_code == SLURM_SUCCESS)
 		error_code = ESLURM_NODES_BUSY;
 	return error_code;
@@ -859,7 +875,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only)
 		shared = 0;
 	else
 		shared = job_ptr->details->shared;
-
+	
 	error_code = _pick_best_nodes(node_set_ptr, node_set_size,
 				      &select_bitmap, job_ptr,
 				      min_nodes, max_nodes,
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index ffe4d94f3ec..d64964b2558 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -358,12 +358,12 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 	pack32(part_ptr->max_nodes, buffer);
 	pack32(part_ptr->min_nodes, buffer);
 
-	pack16(default_part_flag, buffer);
-	pack16(part_ptr->hidden, buffer);
-	pack16(part_ptr->root_only, buffer);
-	pack16(part_ptr->shared, buffer);
+	pack16((uint16_t)default_part_flag, buffer);
+	pack16((uint16_t)part_ptr->hidden, buffer);
+	pack16((uint16_t)part_ptr->root_only, buffer);
+	pack16((uint16_t)part_ptr->shared, buffer);
 
-	pack16(part_ptr->state_up, buffer);
+	pack16((uint16_t)part_ptr->state_up, buffer);
 	packstr(part_ptr->allow_groups, buffer);
 	packstr(part_ptr->nodes, buffer);
 }
@@ -697,6 +697,7 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 {
 	uint16_t default_part_flag;
 	char node_inx_ptr[BUFFER_SIZE];
+	uint32_t altered;
 
 	if (default_part_loc == part_ptr)
 		default_part_flag = 1;
@@ -704,18 +705,32 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 		default_part_flag = 0;
 
 	packstr(part_ptr->name, buffer);
-	pack32(part_ptr->max_time, buffer);
-	pack32(part_ptr->max_nodes, buffer);
-	pack32(part_ptr->min_nodes, buffer);
-	pack32(part_ptr->total_nodes, buffer);
-
-	pack32(part_ptr->total_cpus, buffer);
-	pack16(default_part_flag, buffer);
-	pack16(part_ptr->hidden, buffer);
-	pack16(part_ptr->root_only, buffer);
-	pack16(part_ptr->shared, buffer);
-
-	pack16(part_ptr->state_up, buffer);
+	pack32((uint32_t)part_ptr->max_time, buffer);
+	altered = part_ptr->max_nodes;
+	select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, 
+				&altered);
+	pack32((uint32_t)altered, buffer);
+	altered = part_ptr->min_nodes;
+	select_g_alter_node_cnt(SELECT_APPLY_NODE_MIN_OFFSET, 
+				&altered);
+	pack32((uint32_t)altered, buffer);
+	altered = part_ptr->total_nodes;
+	select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, 
+				&altered);
+	pack32((uint32_t)altered, buffer);
+	select_g_alter_node_cnt(SELECT_GET_NODE_MAX_OFFSET, 
+				&part_ptr->max_offset);
+	pack32(part_ptr->max_offset, buffer);
+	select_g_alter_node_cnt(SELECT_GET_NODE_MIN_OFFSET, 
+				&part_ptr->min_offset);
+	pack32((uint32_t)part_ptr->min_offset, buffer);
+	pack32((uint32_t)part_ptr->total_cpus, buffer);
+	pack16((uint16_t)default_part_flag, buffer);
+	pack16((uint16_t)part_ptr->hidden, buffer);
+	pack16((uint16_t)part_ptr->root_only, buffer);
+	pack16((uint16_t)part_ptr->shared, buffer);
+
+	pack16((uint16_t)part_ptr->state_up, buffer);
 	packstr(part_ptr->allow_groups, buffer);
 	packstr(part_ptr->nodes, buffer);
 	if (part_ptr->node_bitmap) {
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 30d2d1b0e77..dc3e48f3ee5 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -452,7 +452,8 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		do_unlock = true;
 		lock_slurmctld(job_write_lock);
 		error_code = job_allocate(job_desc_msg,
-				immediate, false, true, uid, &job_ptr);
+					  immediate, false, 
+					  true, uid, &job_ptr);
 		/* unlock after finished using the job structure data */
 		END_TIMER;
 	}
@@ -483,7 +484,8 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		alloc_msg.node_cnt       = job_ptr->node_cnt;
 		alloc_msg.node_list      = xstrdup(job_ptr->nodes);
 		alloc_msg.num_cpu_groups = job_ptr->num_cpu_groups;
-		alloc_msg.select_jobinfo = select_g_copy_jobinfo(job_ptr->select_jobinfo);
+		alloc_msg.select_jobinfo = 
+			select_g_copy_jobinfo(job_ptr->select_jobinfo);
 		unlock_slurmctld(job_write_lock);
 
 		response_msg.msg_type = RESPONSE_RESOURCE_ALLOCATION;
@@ -554,7 +556,7 @@ static void _slurm_rpc_allocate_and_run(slurm_msg_t * msg)
 
 	lock_slurmctld(job_write_lock);
 	error_code = job_allocate(job_desc_msg, 
-			immediate, false, true, uid, &job_ptr);
+				  immediate, false, true, uid, &job_ptr);
 
 	/* return result */
 	if (error_code) {
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index b326112d639..c75dd383336 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -207,6 +207,8 @@ struct part_record {
 	uint32_t min_nodes;	/* per job */
 	uint32_t total_nodes;	/* total number of nodes in the partition */
 	uint32_t total_cpus;	/* total number of cpus in the partition */
+	uint32_t min_offset;	/* select plugin min offset */
+	uint32_t max_offset;	/* select plugin max offset */
 	uint16_t root_only;	/* 1 if allocate/submit RPC can only be 
 				   issued by user root */
 	uint16_t shared;	/* 1 if job can share a node,
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 7e4a157a9e7..a33fbb77de9 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -606,13 +606,15 @@ step_create ( job_step_create_request_msg_t *step_specs,
 	if (nodeset == NULL)
 		return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE ;
 	node_count = bit_set_count(nodeset);
-	
+	//select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, &node_count);
+
 	if (step_specs->num_tasks == NO_VAL) {
 		if (step_specs->cpu_count != NO_VAL)
 			step_specs->num_tasks = step_specs->cpu_count;
 		else
 			step_specs->num_tasks = node_count;
 	}
+	
 	if ((step_specs->num_tasks < 1)
 	|| (step_specs->num_tasks > (node_count*MAX_TASKS_PER_NODE)))
 		return ESLURM_BAD_TASK_COUNT;
diff --git a/src/slurmd/common/stepd_api.c b/src/slurmd/common/stepd_api.c
index 1a66ea14c7c..60e5054c42a 100644
--- a/src/slurmd/common/stepd_api.c
+++ b/src/slurmd/common/stepd_api.c
@@ -105,7 +105,7 @@ stepd_connect(char *directory, char *nodename, uint32_t jobid, uint32_t stepid)
 
 	/* Pack the auth credential */
 	rc = g_slurm_auth_pack(auth_cred, buffer);
-	(void) g_slurm_auth_destroy(auth_cred);
+	(void) slurm_free_cred(auth_cred);
 	if (rc) {
 		error("Packing authentication credential: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index a3b690690e7..af8da7f42ea 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -594,7 +594,8 @@ _fork_all_tasks(slurmd_job_t *job)
 			error ("exec_all_tasks: pipe: %m");
 			return SLURM_ERROR;
 		}
-		debug3("New fdpair[0] = %d, fdpair[1] = %d", fdpair[0], fdpair[1]);
+		debug3("New fdpair[0] = %d, fdpair[1] = %d", 
+		       fdpair[0], fdpair[1]);
 		fd_set_close_on_exec(fdpair[0]);
 		fd_set_close_on_exec(fdpair[1]);
 		readfds[i] = fdpair[0];
diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c
index 5ebb66271e7..76552e9d985 100644
--- a/src/slurmd/slurmstepd/req.c
+++ b/src/slurmd/slurmstepd/req.c
@@ -317,7 +317,7 @@ _handle_accept(void *arg)
 	if (rc != SLURM_SUCCESS) {
 		error("Verifying authentication credential: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
-		(void) g_slurm_auth_destroy(auth_cred);
+		(void) slurm_free_cred(auth_cred);
 		free_buf(buffer);
 		goto fail;
 	}
@@ -326,7 +326,7 @@ _handle_accept(void *arg)
 	uid = g_slurm_auth_get_uid(auth_cred);
 	gid = g_slurm_auth_get_gid(auth_cred);
 	debug3("  Identity: uid=%d, gid=%d", uid, gid);
-	g_slurm_auth_destroy(auth_cred);
+	slurm_free_cred(auth_cred);
 	free_buf(buffer);
 
 	rc = SLURM_SUCCESS;
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 517216432b2..5453652a051 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -38,6 +38,9 @@ typedef struct {
 
 static void	_delete_allocated_blocks(List allocated_blocks);
 static allocated_block_t *_make_request(ba_request_t *request);
+static int      _set_layout(char *com);
+static int      _set_base_part_cnt(char *com);
+static int      _set_nodecard_cnt(char *com);
 static int	_create_allocation(char *com, List allocated_blocks);
 static int	_resolve(char *com);
 static int	_change_state_all_bps(char *com, int state);
@@ -52,6 +55,9 @@ static void	_print_header_command(void);
 static void	_print_text_command(allocated_block_t *allocated_block);
 
 char error_string[255];
+int base_part_node_cnt = 512;
+int nodecard_node_cnt = 32;
+char *layout_mode = "STATIC";
 
 static void _delete_allocated_blocks(List allocated_blocks)
 {
@@ -80,7 +86,10 @@ static allocated_block_t *_make_request(ba_request_t *request)
 			  request->geometry[2]);
 		return NULL;
 	} else {
-				
+		if(request->passthrough)
+			sprintf(error_string,"THERE ARE PASSTHROUGHS IN "
+				"THIS ALLOCATION!!!!!!!");
+		
 		allocated_block = (allocated_block_t *)xmalloc(
 			sizeof(allocated_block_t));
 		allocated_block->request = request;
@@ -98,42 +107,121 @@ static allocated_block_t *_make_request(ba_request_t *request)
 
 }
 
-static int _create_allocation(char *com, List allocated_blocks)
+static int _set_layout(char *com)
 {
-	int i=6, geoi=-1, starti=-1, i2=0;
+	int i=0;
 	int len = strlen(com);
 	
-	allocated_block_t *allocated_block = NULL;
-	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t)); 
+	while(i<len) {
+		if(!strncasecmp(com+i, "dynamic", 7)) {
+			layout_mode = "DYNAMIC";
+			break;
+		} else if(!strncasecmp(com+i, "static", 6)) {
+			layout_mode = "STATIC";
+			break;
+		} else if(!strncasecmp(com+i, "overlap", 7)) {
+			layout_mode = "OVERLAP";
+			break;
+		} else {
+			i++;
+		}
+	}
+	if(i>=len) {
+		sprintf(error_string, 
+			"You didn't put in a mode that I recognized. \n"
+			"Please use (STATIC, OVERLAP, or DYNAMIC)\n");
+		return 0;
+	}
+	sprintf(error_string, 
+		"LayoutMode set to %s\n", layout_mode);
 	
-	request->geometry[0] = -1;
-	request->conn_type=TORUS;
-	request->rotate = false;
-	request->elongate = false;
-	request->force_contig = false;
-	request->start_req=0;
-			
+}
+
+static int _set_base_part_cnt(char *com)
+{
+	int i=0;
+	int len = strlen(com);
+
 	while(i<len) {
+		if(com[i] < 58 && com[i] > 47) {
+			break;
+		} else {
+			i++;
+		}
+	}
+	if(i>=len) {		
+		sprintf(error_string, 
+			"I didn't notice the number you typed in\n");
+		return 0;
+	}
+	base_part_node_cnt = atoi(&com[i]);
+	sprintf(error_string, 
+		"BasePartitionNodeCnt set to %d\n", base_part_node_cnt);
 		
-		while(com[i-1]!=' ' && i<len) {
+	return 1;
+}
+
+static int _set_nodecard_cnt(char *com)
+{
+	int i=0;
+	int len = strlen(com);
+
+	while(i<len) {
+		if(com[i] < 58 && com[i] > 47) {
+			break;
+		} else {
 			i++;
 		}
+	}
+	if(i>=len) {		
+		sprintf(error_string, 
+			"I didn't notice the number you typed in\n");
+		return 0;
+	}
+	nodecard_node_cnt = atoi(&com[i]);
+	sprintf(error_string, 
+		"NodeCardNodeCnt set to %d\n", nodecard_node_cnt);
 		
+	return 1;
+}
+
+static int _create_allocation(char *com, List allocated_blocks)
+{
+	int i=6, geoi=-1, starti=-1, i2=0, nodecards=-1, quarters=-1;
+	int len = strlen(com);
+	char *temp = NULL;
+	allocated_block_t *allocated_block = NULL;
+	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t)); 
+	
+	request->geometry[0] = (uint16_t)NO_VAL;
+	request->conn_type=SELECT_TORUS;
+	request->rotate = false;
+	request->elongate = false;
+	request->start_req=0;
+	request->size = 0;
+	request->nodecards = 0;
+	request->quarters = 0;
+	request->passthrough = false;
+	
+	while(i<len) {				
 		if(!strncasecmp(com+i, "mesh", 4)) {
-			request->conn_type=MESH;
+			request->conn_type=SELECT_MESH;
 			i+=4;
 		} else if(!strncasecmp(com+i, "small", 5)) {
-			request->conn_type = SMALL;
+			request->conn_type = SELECT_SMALL;
 			i+=5;
+		} else if(!strncasecmp(com+i, "nodecard", 8)) {
+			nodecards=0;
+			i+=5;
+		} else if(!strncasecmp(com+i, "quarter", 7)) {
+			quarters=0;
+			i+=6;
 		} else if(!strncasecmp(com+i, "rotate", 6)) {
 			request->rotate=true;
 			i+=6;
 		} else if(!strncasecmp(com+i, "elongate", 8)) {
 			request->elongate=true;
 			i+=8;
-		} else if(!strncasecmp(com+i, "force", 5)) {
-			request->force_contig=true;
-			i+=5;
 		} else if(!strncasecmp(com+i, "start", 5)) {
 			request->start_req=1;
 			i+=5;					
@@ -142,6 +230,12 @@ static int _create_allocation(char *com, List allocated_blocks)
 			  && (com[i] < 58 && com[i] > 47)) {
 			starti=i;
 			i++;
+		} else if(nodecards == 0 && (com[i] < 58 && com[i] > 47)) {
+			nodecards=i;
+			i++;
+		} else if(quarters == 0 && (com[i] < 58 && com[i] > 47)) {
+			quarters=i;
+			i++;
 		} else if(geoi<0 && (com[i] < 58 && com[i] > 47)) {
 			geoi=i;
 			i++;
@@ -150,14 +244,49 @@ static int _create_allocation(char *com, List allocated_blocks)
 		}
 		
 	}		
+	
+	if(request->conn_type == SELECT_SMALL) {
+		if(nodecards > 0) {
+			request->nodecards = atoi(&com[nodecards]);
+			nodecards = request->nodecards/4;
+			request->nodecards = nodecards*4;
+		}
 
-	if(geoi<0) {
+		request->quarters = 4;
+		
+		if(request->nodecards > 0)
+			request->quarters -= nodecards;
+
+		if(request->quarters > 4) {
+			request->quarters = 4;
+			request->nodecards = 0;
+		} else if(request->nodecards > 16) {
+			request->quarters = 0;
+			request->nodecards = 16;
+		}
+		
+		quarters = request->quarters*4;
+		nodecards = request->nodecards;
+		if((quarters+nodecards) > 16) {
+			sprintf(error_string, 
+				"please specify a complete split of a "
+				"Base Partion\n"
+				"(i.e. nodecards=4)\0");
+			geoi = -1;
+		}
+		request->size = 1;
+				
+	}
+
+	if(geoi<0 && !request->size) {
 		memset(error_string,0,255);
 		sprintf(error_string, 
 			"No size or dimension specified, please re-enter");
 	} else {
 		i2=geoi;
 		while(i2<len) {
+			if(request->size)
+				break;
 			if(com[i2]==' ' || i2==(len-1)) {
 				/* for size */
 				request->size = atoi(&com[geoi]);
@@ -204,6 +333,9 @@ static int _create_allocation(char *com, List allocated_blocks)
 			request->start[Z] = atoi(&com[starti]);
 		}
 	start_request:
+		if(!strcasecmp(layout_mode,"OVERLAP"))
+			reset_ba_system();
+	
 		/*
 		  Here is where we do the allocating of the partition. 
 		  It will send a request back which we will throw into
@@ -440,12 +572,13 @@ error_message:
 	memset(error_string,0,255);
 #ifdef HAVE_BG
 	sprintf(error_string, 
-		"Problem with nodes specified range was %d%d%dx%d%d%d",
+		"Problem with base partitions, "
+		"specified range was %d%d%dx%d%d%d",
 		start[X],start[Y],start[Z],
 		end[X],end[Y],end[Z]);
 #else
 	sprintf(error_string, 
-		"Problem with nodes specified range was %d-%d",
+		"Problem with nodes,  specified range was %d-%d",
 		start[X],end[X]);
 #endif	
 	return 0;
@@ -481,10 +614,11 @@ static int _remove_allocation(char *com, List allocated_blocks)
 		while((allocated_block = list_next(results_i)) != NULL) {
 			if(found) {
 				if(redo_block(allocated_block->nodes, 
-					     allocated_block->request->geometry,
-					     allocated_block->
-					     request->conn_type, 
-					     color_count) == SLURM_ERROR) {
+					      allocated_block->
+					      request->geometry,
+					      allocated_block->
+					      request->conn_type, 
+					      color_count) == SLURM_ERROR) {
 					memset(error_string,0,255);
 					sprintf(error_string, 
 						"problem redoing the part.");
@@ -497,7 +631,8 @@ static int _remove_allocation(char *com, List allocated_blocks)
 				
 			} else if(allocated_block->letter == letter) {
 				found=1;
-				remove_block(allocated_block->nodes,color_count);
+				remove_block(allocated_block->nodes,
+					     color_count);
 				list_destroy(allocated_block->nodes);
 				delete_ba_request(allocated_block->request);
 				list_remove(results_i);
@@ -513,11 +648,10 @@ static int _remove_allocation(char *com, List allocated_blocks)
 
 static int _alter_allocation(char *com, List allocated_blocks)
 {
-	int torus=TORUS, i=5, i2=0;
+	int torus=SELECT_TORUS, i=5, i2=0;
 	int len = strlen(com);
 	bool rotate = false;
 	bool elongate = false;
-	bool force_contig = false;
 		
 	while(i<len) {
 		
@@ -525,7 +659,7 @@ static int _alter_allocation(char *com, List allocated_blocks)
 			i++;
 		}
 		if(!strncasecmp(com+i, "mesh", 4)) {
-			torus=MESH;
+			torus=SELECT_MESH;
 			i+=4;
 		} else if(!strncasecmp(com+i, "rotate", 6)) {
 			rotate=true;
@@ -533,9 +667,6 @@ static int _alter_allocation(char *com, List allocated_blocks)
 		} else if(!strncasecmp(com+i, "elongate", 8)) {
 			elongate=true;
 			i+=8;
-		} else if(!strncasecmp(com+i, "force", 5)) {
-			force_contig=true;				
-			i+=5;
 		} else if(i2<0 && (com[i] < 58 && com[i] > 47)) {
 			i2=i;
 			i++;
@@ -609,7 +740,8 @@ static int _copy_allocation(char *com, List allocated_blocks)
 		request->conn_type=allocated_block->request->conn_type;
 		request->rotate =allocated_block->request->rotate;
 		request->elongate = allocated_block->request->elongate;
-		request->force_contig = allocated_block->request->force_contig;
+		request->nodecards = allocated_block->request->nodecards;
+		request->quarters = allocated_block->request->quarters;
 				
 		request->rotate_count= 0;
 		request->elongate_count = 0;
@@ -651,6 +783,7 @@ static int _save_allocation(char *com, List allocated_blocks)
 	FILE *file_ptr = NULL;
 	char *conn_type = NULL;
 	char *mode_type = NULL;
+	char extra[20];
 
 	ListIterator results_i;		
 	
@@ -691,20 +824,33 @@ static int _save_allocation(char *com, List allocated_blocks)
 		       file_ptr);
 		fputs ("Numpsets=8\n", file_ptr);
 		fputs ("BridgeAPIVerbose=0\n", file_ptr);
-
+		sprintf(save_string, "BasePartitionNodeCnt=%d\n\0",
+			base_part_node_cnt);
+		fputs (save_string,file_ptr);
+		sprintf(save_string, "NodeCardNodeCnt=%d\n\0",
+			nodecard_node_cnt);
+		fputs (save_string,file_ptr);
+		sprintf(save_string, "LayoutMode=%s\n\0",
+			layout_mode);
+		fputs (save_string,file_ptr);
 		results_i = list_iterator_create(allocated_blocks);
 		while((allocated_block = list_next(results_i)) != NULL) {
 			memset(save_string,0,255);
-			if(allocated_block->request->conn_type == TORUS)
+			memset(extra,0,20);
+			if(allocated_block->request->conn_type == SELECT_TORUS)
 				conn_type = "TORUS";
-			else if(allocated_block->request->conn_type == MESH)
+			else if(allocated_block->request->conn_type 
+				== SELECT_MESH)
 				conn_type = "MESH";
-			else
+			else {
 				conn_type = "SMALL";
-			
-			sprintf(save_string, "Nodes=%s Type=%s\n", 
+				sprintf(extra, " NodeCards=%d Quarters=%d\0",
+					allocated_block->request->nodecards,
+					allocated_block->request->quarters);
+			}
+			sprintf(save_string, "BPs=%s Type=%s%s\n", 
 				allocated_block->request->save_name, 
-				conn_type);
+				conn_type, extra);
 			fputs (save_string,file_ptr);
 		}
 		fclose (file_ptr);
@@ -737,6 +883,8 @@ static int _parse_bg_spec(char *in_line, List allocated_blocks)
 	int start1[BA_SYSTEM_DIMENSIONS];
 	int end1[BA_SYSTEM_DIMENSIONS];
 	int geo[BA_SYSTEM_DIMENSIONS];
+	char *layout = NULL;
+	int pset_num=-1, api_verb=-1, num_segment=0, num_quarter=0;
 	char com[255];
 	int j = 0, number;
 	int len = 0;
@@ -751,9 +899,19 @@ static int _parse_bg_spec(char *in_line, List allocated_blocks)
 	end1[Z] = -1;
 	
 	error_code = slurm_parser(in_line,
+				  "Numpsets=", 'd', &pset_num,
+				  "BasePartitionNodeCnt=", 'd', 
+				  &base_part_node_cnt,
+				  "NodeCardNodeCnt=", 'd', &nodecard_node_cnt,
+				  "LayoutMode=", 's', &layout,
+				  "BPs=", 's', &nodes,
 				  "Nodes=", 's', &nodes,
 				  "Type=", 's', &conn_type,
+				  "NodeCards=", 'd', &num_segment,
+				  "Quarters=", 'd', &num_quarter,
 				  "END");
+	if(layout)
+		_set_layout(layout);
 	if(!nodes)
 		return SLURM_SUCCESS;
 	len = strlen(nodes);
@@ -829,9 +987,11 @@ static int _parse_bg_spec(char *in_line, List allocated_blocks)
 		j++;
 	}
 	memset(com,0,255);
-	sprintf(com,"create %dx%dx%d %s start %dx%dx%d",
+	sprintf(com,"create %dx%dx%d %s start %dx%dx%d "
+		"nodecards=%d quarters=%d",
 		geo[X], geo[Y], geo[Z], conn_type, 
-		start1[X], start1[Y], start1[Z]);
+		start1[X], start1[Y], start1[Z],
+		num_segment, num_quarter);
 	_create_allocation(com, allocated_blocks);
 #endif
 	return SLURM_SUCCESS;
@@ -868,6 +1028,7 @@ static int _load_configuration(char *com, List allocated_blocks)
 				}
 			}
 		}
+		
 	if(filename[0]=='\0') {
 		sprintf(filename,"bluegene.conf");
 	}
@@ -926,20 +1087,33 @@ static void _print_header_command(void)
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "TYPE");
 	ba_system_ptr->xcord += 7;
-	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
-		  ba_system_ptr->xcord, "CONTIG");
-	ba_system_ptr->xcord += 7;
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "ROTATE");
 	ba_system_ptr->xcord += 7;
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "ELONG");
 	ba_system_ptr->xcord += 7;
+#ifdef HAVE_BG
+	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+		  ba_system_ptr->xcord, "BP_COUNT");
+#else
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "NODES");
-	ba_system_ptr->xcord += 7;
+#endif
+	ba_system_ptr->xcord += 10;
+	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+		  ba_system_ptr->xcord, "NODECARDS");
+	ba_system_ptr->xcord += 11;
+	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+		  ba_system_ptr->xcord, "QUARTERS");
+	ba_system_ptr->xcord += 10;
+#ifdef HAVE_BG
+	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+		  ba_system_ptr->xcord, "BP_LIST");
+#else
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "NODELIST");
+#endif
 	ba_system_ptr->xcord = 1;
 	ba_system_ptr->ycord++;
 }
@@ -952,10 +1126,10 @@ static void _print_text_command(allocated_block_t *allocated_block)
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "%c",allocated_block->letter);
 	ba_system_ptr->xcord += 4;
-	if(allocated_block->request->conn_type==TORUS) 
+	if(allocated_block->request->conn_type==SELECT_TORUS) 
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "TORUS");
-	else if (allocated_block->request->conn_type==MESH)
+	else if (allocated_block->request->conn_type==SELECT_MESH)
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "MESH");
 	else 
@@ -963,14 +1137,6 @@ static void _print_text_command(allocated_block_t *allocated_block)
 			  ba_system_ptr->xcord, "SMALL");
 	ba_system_ptr->xcord += 7;
 				
-	if(allocated_block->request->force_contig)
-		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
-			  ba_system_ptr->xcord, "Y");
-	else
-		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
-			  ba_system_ptr->xcord, "N");
-	ba_system_ptr->xcord += 7;
-				
 	if(allocated_block->request->rotate)
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "Y");
@@ -989,7 +1155,21 @@ static void _print_text_command(allocated_block_t *allocated_block)
 
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "%d",allocated_block->request->size);
-	ba_system_ptr->xcord += 7;
+	ba_system_ptr->xcord += 10;
+	
+	if(allocated_block->request->conn_type == SELECT_SMALL) {
+		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "%d", 
+			  allocated_block->request->nodecards);
+		ba_system_ptr->xcord += 11;
+		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "%d", 
+			  allocated_block->request->quarters);
+		ba_system_ptr->xcord += 10;
+		
+	} else
+		ba_system_ptr->xcord += 21;
+	
 	mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 		  ba_system_ptr->xcord, "%s",
 		  allocated_block->request->save_name);
@@ -1090,6 +1270,12 @@ void get_command(void)
 			exit(0);
 		} if (!strcmp(com, "quit")) {
 			break;
+		} else if (!strncasecmp(com, "layout", 6)) {
+			_set_layout(com);
+		} else if (!strncasecmp(com, "basepartition", 13)) {
+			_set_base_part_cnt(com);
+		} else if (!strncasecmp(com, "nodecard", 8)) {
+			_set_nodecard_cnt(com);
 		} else if (!strncasecmp(com, "resolve", 7) ||
 			   !strncasecmp(com, "r ", 2)) {
 			_resolve(com);
diff --git a/src/smap/grid_functions.c b/src/smap/grid_functions.c
index 4eb7eb86b46..d8c85e3d08a 100644
--- a/src/smap/grid_functions.c
+++ b/src/smap/grid_functions.c
@@ -62,10 +62,8 @@ extern int set_grid(int start, int end, int count)
 		    ||  (ba_system_ptr->grid[x].state & NODE_STATE_DRAIN))
 			continue;
 
-		ba_system_ptr->grid[x].letter = 
-			letters[count%62];
-		ba_system_ptr->grid[x].color = 
-			colors[count%6];
+		ba_system_ptr->grid[x].letter = letters[count%62];
+		ba_system_ptr->grid[x].color = colors[count%6];
 	}
 #endif
 	return 1;
@@ -92,14 +90,18 @@ extern int set_grid_bg(int *start, int *end, int count, int set)
 	for (x = start[X]; x <= end[X]; x++) {
 		for (y = start[Y]; y <= end[Y]; y++) {
 			for (z = start[Z]; z <= end[Z]; z++) {
-				if(!set) {
-					if(ba_system_ptr->grid[x][y][z].letter != '#') {
-						ba_system_ptr->grid[x][y][z].letter = 
+				if(set 
+				   || ((ba_system_ptr->grid[x][y][z].letter
+					== '.')
+				       && (ba_system_ptr->grid[x][y][z].letter 
+					   != '#'))) {
+						ba_system_ptr->
+							grid[x][y][z].letter = 
 							letters[count%62];
-						ba_system_ptr->grid[x][y][z].color = 
+						ba_system_ptr->
+							grid[x][y][z].color = 
 							colors[count%6];
-					}
-				}
+				} 
 				i++;
 			}
 		}
diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c
index 8da3c6cbe37..ad70c8506b7 100644
--- a/src/smap/job_functions.c
+++ b/src/smap/job_functions.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  job_functions.c - Functions related to job display mode of smap.
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
@@ -39,10 +39,12 @@ extern void get_job()
 	static int count = 0;
 	static job_info_msg_t *job_info_ptr = NULL, *new_job_ptr = NULL;
 	job_info_t job;
+	uint16_t show_flags = 0;
 
+	show_flags |= SHOW_ALL;
 	if (job_info_ptr) {
 		error_code = slurm_load_jobs(job_info_ptr->last_update,
-				&new_job_ptr, 0);
+				&new_job_ptr, show_flags);
 		if (error_code == SLURM_SUCCESS)
 			slurm_free_job_info_msg(job_info_ptr);
 		else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
@@ -50,7 +52,8 @@ extern void get_job()
 			new_job_ptr = job_info_ptr;
 		}
 	} else
-		error_code = slurm_load_jobs((time_t) NULL, &new_job_ptr, 0);
+		error_code = slurm_load_jobs((time_t) NULL, &new_job_ptr, 
+					     show_flags);
 
 	if (error_code) {
 		if (quiet_flag != 1) {
@@ -78,9 +81,10 @@ extern void get_job()
 	for (i = 0; i < recs; i++) {
 		job = new_job_ptr->job_array[i];
 		
-		if ((job.job_state == JOB_COMPLETE)
-		    || (job.job_state == JOB_END)
-		    || (job.job_state == JOB_FAILED))
+		if ((job.job_state != JOB_PENDING)
+		    &&  (job.job_state != JOB_RUNNING)
+		    &&  (job.job_state != JOB_SUSPENDED)
+		    &&  ((job.job_state & JOB_COMPLETING) == 0))
 			continue;	/* job has completed */
 
 		if (job.node_inx[0] != -1) {
@@ -197,8 +201,13 @@ static void _print_header_job(void)
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "NODES");
 		ba_system_ptr->xcord += 6;
+#ifdef HAVE_BG
+		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "BP_LIST");
+#else
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "NODELIST");
+#endif
 		ba_system_ptr->xcord = 1;
 		ba_system_ptr->ycord++;
 	} else {
@@ -212,7 +221,11 @@ static void _print_header_job(void)
 		printf("ST ");
 		printf("      TIME ");
 		printf("NODES ");
+#ifdef HAVE_BG
+		printf("BP_LIST\n");
+#else
 		printf("NODELIST\n");
+#endif
 	}
 }
 
@@ -225,13 +238,28 @@ static int _print_text_job(job_info_t * job_ptr)
 	int i = 0;
 	int width = 0;
 	char time_buf[20];
-	int quarter = -1;
-
+	char tmp_cnt[7];
+	uint32_t node_cnt = 0;
+	uint16_t quarter = (uint16_t) NO_VAL;
+	uint16_t segment = (uint16_t) NO_VAL;
+	
 #ifdef HAVE_BG
 	select_g_get_jobinfo(job_ptr->select_jobinfo, 
 			     SELECT_DATA_QUARTER, 
 			     &quarter);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_SEGMENT, 
+			     &segment);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_NODE_CNT, 
+			     &node_cnt);
+	if(!strcasecmp(job_ptr->nodes,"waiting...")) 
+		quarter = (uint16_t) NO_VAL;
+#else
+	node_cnt = job_ptr->num_nodes;
 #endif
+	convert_to_kilo(node_cnt, tmp_cnt);
+
 	if(!params.commandline) {
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "%c", job_ptr->num_procs);
@@ -274,21 +302,15 @@ static int _print_text_job(job_info_t * job_ptr)
 			  time_buf);
 		ba_system_ptr->xcord += 11;
 
-		if(quarter != -1)
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, "%5s", 
-				  "0.25");
-		else
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, "%5d", 
-				  job_ptr->num_nodes);
+		mvwprintw(ba_system_ptr->text_win, 
+			  ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "%5s", tmp_cnt);
+		
 		ba_system_ptr->xcord += 6;
 
 		tempxcord = ba_system_ptr->xcord;
 		
-
+		i=0;
 		while (job_ptr->nodes[i] != '\0') {
 			if ((printed = mvwaddch(ba_system_ptr->text_win,
 						ba_system_ptr->ycord, 
@@ -307,12 +329,21 @@ static int _print_text_job(job_info_t * job_ptr)
 			}
 			i++;
 		}
-		if(quarter != -1) {
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, ".%d", 
-				  quarter);		
-			ba_system_ptr->xcord += 2;
+		if(quarter != (uint16_t) NO_VAL) {
+			if(segment != (uint16_t) NO_VAL) {
+				mvwprintw(ba_system_ptr->text_win, 
+					  ba_system_ptr->ycord,
+					  ba_system_ptr->xcord, ".%d.%d", 
+					  quarter,
+					  segment);
+				ba_system_ptr->xcord += 4;
+			} else {
+				mvwprintw(ba_system_ptr->text_win, 
+					  ba_system_ptr->ycord,
+					  ba_system_ptr->xcord, ".%d", 
+					  quarter);
+				ba_system_ptr->xcord += 2;
+			}
 		}
 
 		ba_system_ptr->xcord = 1;
@@ -339,15 +370,15 @@ static int _print_text_job(job_info_t * job_ptr)
 		}
 		
 		printf("%10.10s ", time_buf);
-		
-		if(quarter != -1)
-			printf("%5s ", "0.25");
-		else
-			printf("%5d ", job_ptr->num_nodes);
+
+		printf("%5s ", tmp_cnt);
 		
 		printf("%s", job_ptr->nodes);
-		if(quarter != -1) {
-			printf(".%d",quarter);
+		if(quarter != (uint16_t) NO_VAL) {
+			if(segment != (uint16_t) NO_VAL)
+				printf(".%d.%d", quarter, segment);
+			else
+				printf(".%d", quarter);
 		}
 
 		printf("\n");
diff --git a/src/smap/partition_functions.c b/src/smap/partition_functions.c
index 29882a2fe7d..03b3e2fd6f8 100644
--- a/src/smap/partition_functions.c
+++ b/src/smap/partition_functions.c
@@ -2,7 +2,7 @@
  *  partition_functions.c - Functions related to partition display 
  *  mode of smap.
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
@@ -44,7 +44,9 @@ typedef struct {
 	int letter_num;
 	List nodelist;
 	int size;
-	int quarter;	
+	uint16_t quarter;	
+	uint16_t segment;	
+	int node_cnt;	
 	bool printed;
 
 } db2_block_info_t;
@@ -163,6 +165,7 @@ extern void get_bg_part()
 	partition_info_t part;
 	int number, start[BA_SYSTEM_DIMENSIONS], end[BA_SYSTEM_DIMENSIONS];
 	db2_block_info_t *block_ptr = NULL;
+	db2_block_info_t *found_block = NULL;
 	ListIterator itr;
 	List nodelist = NULL;
 
@@ -240,8 +243,7 @@ extern void get_bg_part()
 	
 	for (i=0; i<new_bg_ptr->record_count; i++) {
 		block_ptr = xmalloc(sizeof(db2_block_info_t));
-		list_append(block_list, block_ptr);
-		
+			
 		block_ptr->bg_block_name 
 			= xstrdup(new_bg_ptr->bg_info_array[i].bg_block_id);
 		block_ptr->nodes 
@@ -259,15 +261,31 @@ extern void get_bg_part()
 			= new_bg_ptr->bg_info_array[i].node_use;
 		block_ptr->quarter 
 			= new_bg_ptr->bg_info_array[i].quarter;
-		if(block_ptr->quarter < 1) {
+		block_ptr->segment 
+			= new_bg_ptr->bg_info_array[i].segment;
+		block_ptr->node_cnt 
+			= new_bg_ptr->bg_info_array[i].node_cnt;
+	       
+		itr = list_iterator_create(block_list);
+		while ((found_block = (db2_block_info_t*)list_next(itr)) 
+		       != NULL) {
+			if(!strcmp(block_ptr->nodes, found_block->nodes)) {
+				block_ptr->letter_num = 
+					found_block->letter_num;
+				break;
+			}
+		}
+		list_iterator_destroy(itr);
+
+		if(!found_block) {
 			last_count++;
 			_marknodes(block_ptr, last_count);
-		} else 
-			block_ptr->letter_num = last_count;
+		}
 		
 		if(block_ptr->bg_conn_type == SELECT_SMALL)
 			block_ptr->size = 0;
-		
+
+		list_append(block_list, block_ptr);
 	}
 	
 	if (!params.no_header)
@@ -357,22 +375,16 @@ static int _marknodes(db2_block_info_t *block_ptr, int count)
 			end[Z] = (number % 10);
 			j += 3;
 			
-			if(start[X] == 0
-			   && start[Y] == 0
-			   && start[Z] == 0
-			   && end[X] == (DIM_SIZE[X]-1) 
-			   && end[Y] == (DIM_SIZE[Y]-1)
-			   && end[Z] == (DIM_SIZE[Z]-1) 
-			   && block_ptr->state == RM_PARTITION_FREE) 
+			if(block_ptr->state != RM_PARTITION_FREE) 
 				block_ptr->size += set_grid_bg(start,
-								end,
-								count,
-								1);
+							       end,
+							       count,
+							       1);
 			else
 				block_ptr->size += set_grid_bg(start, 
-								end, 
-								count, 
-								0);
+							       end, 
+							       count, 
+							       0);
 			if(block_ptr->nodes[j] != ',')
 				break;
 			j--;
@@ -441,8 +453,13 @@ static void _print_header_part(void)
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "NODES");
 		ba_system_ptr->xcord += 7;
+#ifdef HAVE_BG
+		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "BP_LIST");
+#else
 		mvwprintw(ba_system_ptr->text_win, ba_system_ptr->ycord,
 			  ba_system_ptr->xcord, "NODELIST");
+#endif
 		ba_system_ptr->xcord = 1;
 		ba_system_ptr->ycord++;
 	} else {
@@ -459,7 +476,11 @@ static void _print_header_part(void)
 		}
 
 		printf("NODES ");
+#ifdef HAVE_BG
+		printf("BP_LIST\n");
+#else
 		printf("NODELIST\n");	
+#endif
 	}	
 }
 
@@ -499,21 +520,15 @@ static int _print_text_part(partition_info_t *part_ptr,
 	int i = 0;
 	int width = 0;
 	char *nodes = NULL, time_buf[20];
+	char tmp_cnt[7];
+
+	convert_to_kilo(part_ptr->total_nodes, tmp_cnt);
 
 	if(!params.commandline) {
-		if((params.display == BGPART) 
-		   && db2_info_ptr->quarter != -1) {
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, "%c.%d", 
-				  part_ptr->root_only, 
-				  db2_info_ptr->quarter);
-		} else {
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, "%c", 
-				  part_ptr->root_only);
-		}
+		mvwprintw(ba_system_ptr->text_win, 
+			  ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "%c", 
+			  part_ptr->root_only);
 		ba_system_ptr->xcord += 4;
 
 		if (part_ptr->name) {
@@ -612,16 +627,10 @@ static int _print_text_part(partition_info_t *part_ptr,
 				ba_system_ptr->xcord += 10;
 			}
 		}
-		if(part_ptr->total_nodes == 0)
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, "%5s", 
-				  "0.25");
-		else	
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, "%5d", 
-				  part_ptr->total_nodes);
+		mvwprintw(ba_system_ptr->text_win, 
+			  ba_system_ptr->ycord,
+			  ba_system_ptr->xcord, "%5s", tmp_cnt);
+			  
 		ba_system_ptr->xcord += 7;
 
 		tempxcord = ba_system_ptr->xcord;
@@ -630,6 +639,7 @@ static int _print_text_part(partition_info_t *part_ptr,
 			nodes = part_ptr->allow_groups;
 		else
 			nodes = part_ptr->nodes;
+		i=0;
 		prefixlen = i;
 		while (nodes && nodes[i]) {
 			width = ba_system_ptr->text_win->_maxx 
@@ -659,11 +669,19 @@ static int _print_text_part(partition_info_t *part_ptr,
 			i++;
 		}
 		if((params.display == BGPART) 
-		   && (db2_info_ptr->quarter != -1)) {
-			mvwprintw(ba_system_ptr->text_win, 
-				  ba_system_ptr->ycord,
-				  ba_system_ptr->xcord, ".%d",
-				  db2_info_ptr->quarter);
+		   && (db2_info_ptr->quarter != (uint16_t) NO_VAL)) {
+			if(db2_info_ptr->segment != (uint16_t) NO_VAL) {
+				mvwprintw(ba_system_ptr->text_win, 
+					  ba_system_ptr->ycord,
+					  ba_system_ptr->xcord, ".%d.%d", 
+					  db2_info_ptr->quarter,
+					  db2_info_ptr->segment);
+			} else {
+				mvwprintw(ba_system_ptr->text_win, 
+					  ba_system_ptr->ycord,
+					  ba_system_ptr->xcord, ".%d", 
+					  db2_info_ptr->quarter);
+			}
 		}
 			
 		ba_system_ptr->xcord = 1;
@@ -709,10 +727,7 @@ static int _print_text_part(partition_info_t *part_ptr,
 			} 
 		}
 		
-		if(part_ptr->total_nodes == 0)
-			printf("%5s ", "0.25");
-		else	
-			printf("%5d ", part_ptr->total_nodes);
+		printf("%5s ", tmp_cnt);
 		
 		tempxcord = ba_system_ptr->xcord;
 		
@@ -722,9 +737,15 @@ static int _print_text_part(partition_info_t *part_ptr,
 			nodes = part_ptr->nodes;
 		
 		if((params.display == BGPART) 
-		   && (db2_info_ptr->quarter != -1))
-			printf("%s.%d\n", nodes, db2_info_ptr->quarter);
-		else
+		   && (db2_info_ptr->quarter != (uint16_t) NO_VAL)) {
+			if(db2_info_ptr->segment != (uint16_t) NO_VAL)
+				printf("%s.%d.%d\n", nodes, 
+				       db2_info_ptr->quarter,
+				       db2_info_ptr->segment);
+			else 
+				printf("%s.%d\n", nodes, 
+				       db2_info_ptr->quarter);
+		} else
 			printf("%s\n",nodes);
 	}
 	return printed;
@@ -800,10 +821,11 @@ static int _print_rest(db2_block_info_t *block_ptr)
 {
 	partition_info_t part;
 	db2_block_info_t *db2_info_ptr = NULL;
-	ListIterator itr;
 	int set = 0;
 		
-	part.total_nodes = block_ptr->size;
+	if(block_ptr->node_cnt == 0)
+		block_ptr->node_cnt = block_ptr->size;
+	part.total_nodes = block_ptr->node_cnt;
 	if(block_ptr->slurm_part_name)
 		part.name = block_ptr->slurm_part_name;
 	else
diff --git a/src/smap/smap.c b/src/smap/smap.c
index eff93c9ed6e..9245b4ae270 100644
--- a/src/smap/smap.c
+++ b/src/smap/smap.c
@@ -72,9 +72,11 @@ int main(int argc, char *argv[])
 #ifdef HAVE_BG
 	int mapset = 0;	
 #endif
-	//char *name;	
+	//char *name;
+	
 	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_DAEMON, NULL);
 	parse_command_line(argc, argv);
+
 	while (slurm_load_node((time_t) NULL, &new_node_ptr, SHOW_ALL)) { 
 		error_code = slurm_get_errno();
 		printf("slurm_load_node: %s\n", slurm_strerror(error_code));
@@ -86,8 +88,9 @@ int main(int argc, char *argv[])
 			exit(1);
 		sleep(10);	/* keep trying to reconnect */
 	}
-	ba_init(new_node_ptr);
 	
+	ba_init(new_node_ptr);
+		
 	if(params.partition) {
 			
 #ifdef HAVE_BG_FILES
diff --git a/src/squeue/print.c b/src/squeue/print.c
index 978f4615e15..a64304162d7 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -530,9 +530,13 @@ int _print_job_priority(job_info_t * job, int width, bool right, char* suffix)
 
 int _print_job_nodes(job_info_t * job, int width, bool right, char* suffix)
 {
-	if (job == NULL)        /* Print the Header instead */
+	if (job == NULL) {       /* Print the Header instead */
+#ifdef HAVE_BG
+		_print_str("BP_LIST", width, right, false);
+#else
 		_print_str("NODELIST", width, right, false);
-	else
+#endif
+	} else
 		_print_nodes(job->nodes, width, right, false);
 
 	if (suffix)
@@ -543,12 +547,17 @@ int _print_job_nodes(job_info_t * job, int width, bool right, char* suffix)
 int _print_job_reason_list(job_info_t * job, int width, bool right, 
 		char* suffix)
 {
-	int quarter = -1;
-	char tmp_char[3];
+	uint16_t quarter = (uint16_t) NO_VAL;
+	uint16_t segment = (uint16_t) NO_VAL;
+	char tmp_char[6];
 	
-	if (job == NULL)	/* Print the Header instead */
+	if (job == NULL) {	/* Print the Header instead */
+#ifdef HAVE_BG
+		_print_str("BP_LIST(REASON)", width, right, false);
+#else
 		_print_str("NODELIST(REASON)", width, right, false);
-	else if (job->job_state == JOB_PENDING) {
+#endif
+	} else if (job->job_state == JOB_PENDING) {
 		char id[FORMAT_STRING_SIZE];
 		snprintf(id, FORMAT_STRING_SIZE, "(%s)", 
 			job_reason_string(job->wait_reason));
@@ -558,11 +567,17 @@ int _print_job_reason_list(job_info_t * job, int width, bool right,
 		select_g_get_jobinfo(job->select_jobinfo, 
 				     SELECT_DATA_QUARTER, 
 				     &quarter);
+		select_g_get_jobinfo(job->select_jobinfo, 
+				     SELECT_DATA_SEGMENT, 
+				     &segment);
 #endif
 		
 		_print_nodes(job->nodes, width, right, false);
-		if(quarter != -1) {
-			sprintf(tmp_char,".%d",quarter);
+		if(quarter != (uint16_t) NO_VAL) {
+			if(segment != (uint16_t) NO_VAL) 
+				sprintf(tmp_char,".%d.%d\0",quarter,segment);
+			else
+				sprintf(tmp_char,".%d\0",quarter);
 			_print_str(tmp_char, width, right, false);
 		}
 	}
@@ -594,10 +609,13 @@ int _print_job_node_inx(job_info_t * job, int width, bool right, char* suffix)
 
 int _print_job_num_procs(job_info_t * job, int width, bool right, char* suffix)
 {
+	char tmp_char[6];
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("CPUS", width, right, true);
-	else
-		_print_int(job->num_procs, width, right, true);
+	else {
+		convert_to_kilo(job->num_procs, tmp_char);
+		_print_str(tmp_char, width, right, true);
+	}
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -606,21 +624,22 @@ int _print_job_num_procs(job_info_t * job, int width, bool right, char* suffix)
 int _print_job_num_nodes(job_info_t * job, int width, bool right_justify, 
 			 char* suffix)
 {
-	int quarter = -1;
+	uint32_t node_cnt = 0;
+	char tmp_char[6];
+	int i=0;
+
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("NODES", width, right_justify, true);
 	else {
 #ifdef HAVE_BG
 		select_g_get_jobinfo(job->select_jobinfo, 
-				     SELECT_DATA_QUARTER, 
-				     &quarter);
+				     SELECT_DATA_NODE_CNT, 
+				     &node_cnt);
 #endif
-		
-		if(quarter != -1)
-			_print_str("0.25", width, right_justify, true);
-		else
-			_print_int(_get_node_cnt(job), width, 
-				   right_justify, true);
+		if(node_cnt == 0)
+			node_cnt = _get_node_cnt(job);
+		convert_to_kilo(node_cnt, tmp_char);
+		_print_str(tmp_char, width, right_justify, true);
 	}
 	if (suffix)
 		printf("%s", suffix);
@@ -667,10 +686,14 @@ static bool _node_in_list(char *node_name, char *node_list)
 int _print_job_shared(job_info_t * job, int width, bool right_justify, 
 		      char* suffix)
 {
+	char tmp_char[6];
+
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("SHARED", width, right_justify, true);
-	else
-		_print_int(job->shared, width, right_justify, true);
+	else {
+		convert_to_kilo(job->shared, tmp_char);
+		_print_str(tmp_char, width, right_justify, true);
+	}
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -679,10 +702,15 @@ int _print_job_shared(job_info_t * job, int width, bool right_justify,
 int _print_job_contiguous(job_info_t * job, int width, bool right_justify, 
 			  char* suffix)
 {
+	char tmp_char[6];
+
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("CONTIGUOUS", width, right_justify, true);
-	else
-		_print_int(job->contiguous, width, right_justify, true);
+	else {
+		convert_to_kilo(job->contiguous, tmp_char);
+		_print_str(tmp_char, width, right_justify, true);
+	}
+	
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -691,10 +719,15 @@ int _print_job_contiguous(job_info_t * job, int width, bool right_justify,
 int _print_job_min_procs(job_info_t * job, int width, bool right_justify, 
 			 char* suffix)
 {
+	char tmp_char[6];
+	
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("MIN_PROCS", width, right_justify, true);
-	else
-		_print_int(job->min_procs, width, right_justify, true);
+	else {
+		convert_to_kilo(job->min_procs, tmp_char);
+		_print_str(tmp_char, width, right_justify, true);
+	}
+		
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -703,10 +736,15 @@ int _print_job_min_procs(job_info_t * job, int width, bool right_justify,
 int _print_job_min_memory(job_info_t * job, int width, bool right_justify, 
 			  char* suffix)
 {
+	char tmp_char[6];
+	
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("MIN_MEMORY", width, right_justify, true);
-	else
-		_print_int(job->min_memory, width, right_justify, true);
+	else {
+		convert_to_kilo(job->min_memory, tmp_char);
+		_print_str(tmp_char, width, right_justify, true);
+	}
+	
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -716,10 +754,15 @@ int
 _print_job_min_tmp_disk(job_info_t * job, int width, bool right_justify, 
 			char* suffix)
 {
+	char tmp_char[6];
+	
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("MIN_TMP_DISK", width, right_justify, true);
-	else
-		_print_int(job->min_tmp_disk, width, right_justify, true);
+	else {
+		convert_to_kilo(job->min_tmp_disk, tmp_char);
+		_print_str(tmp_char, width, right_justify, true);
+	}
+		
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -1011,9 +1054,13 @@ int _print_step_name(job_step_info_t * step, int width, bool right,
 int _print_step_nodes(job_step_info_t * step, int width, bool right, 
 		      char* suffix)
 {
-	if (step == NULL)	/* Print the Header instead */
+	if (step == NULL) {	/* Print the Header instead */
+#ifdef HAVE_BG
+		_print_str("BP_LIST", width, right, false);
+#else
 		_print_str("NODELIST", width, right, false);
-	else 
+#endif
+	} else 
 		_print_nodes(step->nodes, width, right, false);
 	if (suffix)
 		printf("%s", suffix);
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index 8f212e7a080..5294ef74484 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -371,8 +371,10 @@ job_desc_msg_create_from_opts (char *script)
 {
 	extern char **environ;
 	job_desc_msg_t *j = xmalloc(sizeof(*j));
+	uint16_t tmp = 0;
 
 	slurm_init_job_desc_msg(j);
+	select_g_alloc_jobinfo (&j->select_jobinfo);
 
 	j->contiguous     = opt.contiguous;
 	j->features       = opt.constraints;
@@ -404,19 +406,20 @@ job_desc_msg_create_from_opts (char *script)
 	if (opt.jobid != NO_VAL)
 		j->job_id	= opt.jobid;
 
-#if SYSTEM_DIMENSIONS
-	if (opt.geometry[0] > 0) {
-		int i;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
-			j->geometry[i] = opt.geometry[i];
+	if (opt.geometry[0] > 0) 
+		select_g_set_jobinfo(j->select_jobinfo,
+				     SELECT_DATA_GEOMETRY,
+				     opt.geometry);
+	if (opt.conn_type != -1) 
+		select_g_set_jobinfo(j->select_jobinfo,
+				     SELECT_DATA_CONN_TYPE,
+				     &opt.conn_type);
+	if (opt.no_rotate) {
+		tmp = 0;
+		select_g_set_jobinfo(j->select_jobinfo,
+				     SELECT_DATA_ROTATE,
+				     &tmp);	
 	}
-#endif
-
-	if (opt.conn_type > -1)
-		j->conn_type = opt.conn_type;
-	if (opt.no_rotate)
-		j->rotate = 0;
-
 	if (opt.max_nodes)
 		j->max_nodes    = opt.max_nodes;
 	if (opt.mincpus > -1)
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 0e39ed58052..11243e4cec2 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -164,7 +164,7 @@ static enum  task_dist_states _verify_dist_type(const char *arg);
 static bool  _verify_node_count(const char *arg, int *min, int *max);
 static int   _verify_cpu_bind(const char *arg, char **cpu_bind,
 					cpu_bind_type_t *cpu_bind_type);
-static int   _verify_geometry(const char *arg, int *geometry);
+static int   _verify_geometry(const char *arg, uint16_t *geometry);
 static int   _verify_conn_type(const char *arg);
 
 /*---[ end forward declarations of static functions ]---------------------*/
@@ -274,7 +274,7 @@ static int _verify_conn_type(const char *arg)
  * verify geometry arguments, must have proper count
  * returns -1 on error, 0 otherwise
  */
-static int _verify_geometry(const char *arg, int *geometry)
+static int _verify_geometry(const char *arg, uint16_t *geometry)
 {
 	char* token, *delimiter = ",x", *next_ptr;
 	int i, rc = 0;
@@ -288,8 +288,8 @@ static int _verify_geometry(const char *arg, int *geometry)
 			rc = -1;
 			break;
 		}
-		geometry[i] = atoi(token);
-		if (geometry[i] <= 0) {
+		geometry[i] = (uint16_t)atoi(token);
+		if (geometry[i] == 0 || geometry[i] == (uint16_t)NO_VAL) {
 			error("invalid --geometry argument");
 			rc = -1;
 			break;
@@ -412,17 +412,27 @@ _verify_node_count(const char *arg, int *min_nodes, int *max_nodes)
 {
 	char *end_ptr;
 	int val1, val2;
-
+	
 	val1 = strtol(arg, &end_ptr, 10);
-	if (end_ptr[0] == '\0') {
+	if (end_ptr[0] == 'k' || end_ptr[0] == 'K') {
+		val1 *= 1024;
+		end_ptr++;
+	}
+
+ 	if (end_ptr[0] == '\0') {
 		*min_nodes = val1;
 		return true;
 	}
-
+	
 	if (end_ptr[0] != '-')
 		return false;
 
 	val2 = strtol(&end_ptr[1], &end_ptr, 10);
+	if (end_ptr[0] == 'k' || end_ptr[0] == 'K') {
+		val2 *= 1024;
+		end_ptr++;
+	}
+
 	if (end_ptr[0] == '\0') {
 		*min_nodes = val1;
 		*max_nodes = val2;
@@ -613,7 +623,7 @@ static void _opt_default()
 	opt.msg_timeout     = 5;  /* Default launch msg timeout           */
 
 	for (i=0; i<SYSTEM_DIMENSIONS; i++)
-		opt.geometry[i]	    = -1;
+		opt.geometry[i]	    = (uint16_t) NO_VAL;
 	opt.no_rotate	    = false;
 	opt.conn_type	    = -1;
 
@@ -722,57 +732,57 @@ _process_env_var(env_vars_t *e, const char *val)
 
 	switch (e->type) {
 	case OPT_STRING:
-	    *((char **) e->arg) = xstrdup(val);
-	    break;
+		*((char **) e->arg) = xstrdup(val);
+		break;
 	case OPT_INT:
-	    if (val != NULL) {
-		    *((int *) e->arg) = (int) strtol(val, &end, 10);
-		    if (!(end && *end == '\0')) 
-			    error("%s=%s invalid. ignoring...", e->var, val);
-	    }
-	    break;
+		if (val != NULL) {
+			*((int *) e->arg) = (int) strtol(val, &end, 10);
+			if (!(end && *end == '\0')) 
+				error("%s=%s invalid. ignoring...", e->var, val);
+		}
+		break;
 
 	case OPT_DEBUG:
-	    if (val != NULL) {
-		    _verbose = (int) strtol(val, &end, 10);
-		    if (!(end && *end == '\0')) 
-			    error("%s=%s invalid", e->var, val);
-	    }
-	    break;
+		if (val != NULL) {
+			_verbose = (int) strtol(val, &end, 10);
+			if (!(end && *end == '\0')) 
+				error("%s=%s invalid", e->var, val);
+		}
+		break;
 
 	case OPT_DISTRIB:
-	    dt = _verify_dist_type(val);
-	    if (dt == SLURM_DIST_UNKNOWN) {
-		    error("\"%s=%s\" -- invalid distribution type. " 
-		          "ignoring...", e->var, val);
-	    } else 
-		    opt.distribution = dt;
-	    break;
+		dt = _verify_dist_type(val);
+		if (dt == SLURM_DIST_UNKNOWN) {
+			error("\"%s=%s\" -- invalid distribution type. " 
+			      "ignoring...", e->var, val);
+		} else 
+			opt.distribution = dt;
+		break;
 
 	case OPT_CPU_BIND:
-	    if (_verify_cpu_bind(val, &opt.cpu_bind,
-					    &opt.cpu_bind_type))
-		    exit(1);
-	    break;
+		if (_verify_cpu_bind(val, &opt.cpu_bind,
+				     &opt.cpu_bind_type))
+			exit(1);
+		break;
 
 	case OPT_NODES:
-	    opt.nodes_set = _verify_node_count( val, 
-			                        &opt.min_nodes, 
-					        &opt.max_nodes );
-	    if (opt.nodes_set == false) {
-		    error("\"%s=%s\" -- invalid node count. ignoring...",
-			  e->var, val);
-	    }
-	    break;
+		opt.nodes_set = _verify_node_count( val, 
+						    &opt.min_nodes, 
+						    &opt.max_nodes );
+		if (opt.nodes_set == false) {
+			error("\"%s=%s\" -- invalid node count. ignoring...",
+			      e->var, val);
+		}
+		break;
 
 	case OPT_OVERCOMMIT:
-	    opt.overcommit = true;
-	    break;
+		opt.overcommit = true;
+		break;
 
 	case OPT_CORE:
-	    opt.core_type = core_format_type (val);
-	    break;
-
+		opt.core_type = core_format_type (val);
+		break;
+	    
 	case OPT_CONN_TYPE:
 		opt.conn_type = _verify_conn_type(val);
 		break;
@@ -784,7 +794,7 @@ _process_env_var(env_vars_t *e, const char *val)
 	case OPT_GEOMETRY:
 		if (_verify_geometry(val, opt.geometry)) {
 			error("\"%s=%s\" -- invalid geometry, ignoring...",
-				e->var, val);
+			      e->var, val);
 		}
 		break;
 
@@ -797,8 +807,8 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	default:
-	    /* do nothing */
-	    break;
+		/* do nothing */
+		break;
 	}
 }
 
@@ -1007,7 +1017,6 @@ void set_options(const int argc, char **argv, int first)
 		case (int)'g':
 			if(!first && opt.geometry)
 				break;
-						
 			if (_verify_geometry(optarg, opt.geometry))
 				exit(1);
 			break;
@@ -1708,7 +1717,7 @@ print_geometry()
 	char buf[32], *rc = NULL;
 
 	if ((SYSTEM_DIMENSIONS == 0)
-	||  (opt.geometry[0] < 0))
+	||  (opt.geometry[0] == (uint16_t)NO_VAL))
 		return NULL;
 
 	for (i=0; i<SYSTEM_DIMENSIONS; i++) {
diff --git a/src/srun/opt.h b/src/srun/opt.h
index 86cf0b867ac..b617911f533 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -161,9 +161,9 @@ typedef struct srun_options {
 	char *network;		/* --network=			*/
         bool exclusive;         /* --exclusive                  */
 
-	int geometry[SYSTEM_DIMENSIONS]; /* --geometry, -g	*/
+	uint16_t geometry[SYSTEM_DIMENSIONS]; /* --geometry, -g	*/
 	bool no_rotate;		/* --no_rotate, -R		*/
-	int conn_type;		/* --conn-type 			*/
+	int16_t conn_type;	/* --conn-type 			*/
 	char *prolog;           /* --prolog                     */
 	char *epilog;           /* --epilog                     */
 	time_t begin;		/* --begin			*/
diff --git a/testsuite/expect/globals.example b/testsuite/expect/globals.example
index 5bfe91a84a5..2b920a36c08 100755
--- a/testsuite/expect/globals.example
+++ b/testsuite/expect/globals.example
@@ -391,6 +391,36 @@ proc test_front_end { } {
 	return $bluegene
 }
 
+################################################################
+#
+# Proc: test_bluegene
+#
+# Purpose: Determine if the system is a bluegene system
+#
+# Returns 1 if the system is a bluegene, 0 otherwise
+#
+################################################################
+
+proc test_bluegene { } {
+	global scontrol bin_bash bin_grep
+
+	log_user 0
+	set bluegene 0
+	spawn -noecho $bin_bash -c "exec $scontrol show config | $bin_grep SelectType"
+	expect {
+		"select/bluegene" {
+			set bluegene 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $bluegene
+}
+
 ################################################################
 #
 # Proc: test_super_user
diff --git a/testsuite/expect/test1.13 b/testsuite/expect/test1.13
index e7a49077e31..54d35f95430 100755
--- a/testsuite/expect/test1.13
+++ b/testsuite/expect/test1.13
@@ -46,7 +46,7 @@ print_header $test_id
 #
 spawn $srun --immediate --hold -t1 $bin_pwd
 expect {
-	-re "Immediate execution impossible" {
+	-re "Unable to allocate resources" {
 		send_user "This error is expected, no worries\n"
 		incr matches
 		exp_continue
diff --git a/testsuite/expect/test1.19 b/testsuite/expect/test1.19
index e241d128c94..28fcb330b8d 100755
--- a/testsuite/expect/test1.19
+++ b/testsuite/expect/test1.19
@@ -157,7 +157,7 @@ if {$file_cnt != 1} {
 #  file name and confirm it is created
 #
 set job_id   0
-spawn $srun --output=$file_out_J -N1-4 -O -v -t1 $bin_hostname
+spawn $srun --output=$file_out_J -N1 -v -t1 $bin_hostname
 expect {
 	-re "jobid ($number).*" {
 		set job_id $expect_out(1,string)
@@ -271,8 +271,16 @@ for {set step_id 0} {$step_id < 4} {incr step_id} {
 	exec $bin_rm -f $file_out_s_glob
 }
 
+if { [test_bluegene] } {
+	set node_cnt 32-2048
+	set task_cnt 32
+} else {
+	set node_cnt 1-4
+	set task_cnt 4
+}
+
 set job_id   0
-spawn $srun --batch --output=/dev/null -N1-4 -n4 -O -t1 $file_in
+spawn $srun --batch --output=/dev/null -N$node_cnt -n$task_cnt -O -t1 $file_in
 expect {
 	-re "jobid ($number) submitted" {
 		set job_id $expect_out(1,string)
diff --git a/testsuite/expect/test1.22 b/testsuite/expect/test1.22
index 2ff22833198..7a76d1d0de0 100755
--- a/testsuite/expect/test1.22
+++ b/testsuite/expect/test1.22
@@ -43,7 +43,13 @@ print_header $test_id
 #
 set host_0      ""
 set timeout $max_job_delay
-spawn $srun -N1-64 -l --threads=1 -t1 $bin_hostname  
+if { [test_bluegene] } {
+	set node_cnt 32-32k
+} else {
+	set node_cnt 1-64
+}
+
+spawn $srun -N$node_cnt -l --threads=1 -t1 $bin_hostname  
 expect {
 	-re "0: ($alpha_numeric)" {
 		set host_0 $expect_out(1,string)
@@ -72,7 +78,7 @@ if {[string compare $host_0 ""] == 0} {
 #
 set host_0      ""
 set timeout $max_job_delay
-spawn $srun -N1-64 -l --threads=32 -t1 $bin_hostname  
+spawn $srun -N$node_cnt -l --threads=32 -t1 $bin_hostname  
 expect {
 	-re "0: ($alpha_numeric)" {
 		set host_0 $expect_out(1,string)
diff --git a/testsuite/expect/test1.25 b/testsuite/expect/test1.25
index 7b325bee119..31f3f64e6bd 100755
--- a/testsuite/expect/test1.25
+++ b/testsuite/expect/test1.25
@@ -44,7 +44,13 @@ print_header $test_id
 #
 set host_0      ""
 set timeout [expr $max_job_delay + $sleep_time]
-spawn $srun -N1-64 --no-kill -t1 $bin_sleep $sleep_time  
+if { [test_bluegene] } {
+	set node_cnt 32-32k
+} else {
+	set node_cnt 1-64
+}
+
+spawn $srun -N$node_cnt --no-kill -t1 $bin_sleep $sleep_time  
 expect {
 	-re "error" {
 		send_user "\nFAILURE: some error occurred\n"
diff --git a/testsuite/expect/test1.31 b/testsuite/expect/test1.31
index f7fdad65dbc..23d17052558 100755
--- a/testsuite/expect/test1.31
+++ b/testsuite/expect/test1.31
@@ -38,11 +38,18 @@ set env_flag_debug   0
 set env_name_debug   "SLURM_DEBUG"
 set env_valu_debug   1
 set env_name_nnodes  "SLURM_NNODES"
-set min_nodes        1
-set max_nodes        2
+
+if { [test_bluegene] } {
+	set min_nodes        1
+	set max_nodes        1024
+	set env_valu_nprocs  5
+} else {
+	set min_nodes        1
+	set max_nodes        2
+	set env_valu_nprocs  5
+}
 set env_valu_nnodes  "$min_nodes-$max_nodes"
 set env_name_nprocs  "SLURM_NPROCS"
-set env_valu_nprocs  5
 set env_name_stdout  "SLURM_STDOUTMODE"
 set env_valu_stdout  "test$test_id.output"
 set env_name_overcom "SLURM_OVERCOMMIT"
@@ -61,6 +68,7 @@ print_header $test_id
 #
 set env($env_name_debug)   $env_valu_debug
 set env($env_name_nnodes)  $env_valu_nnodes
+send_user "$env_valu_nnodes $env_valu_nprocs\n";
 set env($env_name_nprocs)  $env_valu_nprocs
 set env($env_name_stdout)  $env_valu_stdout
 set env($env_name_overcom) $env_valu_overcom
diff --git a/testsuite/expect/test1.35 b/testsuite/expect/test1.35
index fe076592b9d..81e0b64c878 100755
--- a/testsuite/expect/test1.35
+++ b/testsuite/expect/test1.35
@@ -69,7 +69,14 @@ exec $bin_chmod 700 $file_in
 # Spawn a srun batch job that uses stdout/err and confirm their contents
 #
 set timeout $max_job_delay
-spawn $srun --batch -N1-4 --output=$file_out --error=$file_err -t1 $file_in
+
+if { [test_bluegene] } {
+	set node_cnt 32-2048
+} else {
+	set node_cnt 1-4
+}
+
+spawn $srun --batch -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in
 expect {
 	-re "jobid ($number) submitted" {
 		set job_id $expect_out(1,string)
diff --git a/testsuite/expect/test1.36 b/testsuite/expect/test1.36
index 2038b51d7fb..f5578d8a077 100755
--- a/testsuite/expect/test1.36
+++ b/testsuite/expect/test1.36
@@ -35,8 +35,8 @@ set test_id     "1.36"
 
 set exit_code    0
 set mult         4
-set task_cnt     4
 set task_output  0
+set task_cnt 4
 
 print_header $test_id
 
@@ -44,7 +44,14 @@ print_header $test_id
 # Spawn srun with $task_cnt tasks each of which runs a $mult way /bin/id
 #
 set timeout $max_job_delay
-spawn $srun -N1-4 -n$task_cnt -O -t1 $srun -l -n$mult -O $bin_id
+
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
+
+spawn $srun -N$node_cnt -n$task_cnt -O -t1 $srun -l -n$mult -O $bin_id
 expect {
 	-re "($number): uid=" {
 		incr task_output
diff --git a/testsuite/expect/test1.41 b/testsuite/expect/test1.41
index 5ad33f1b0a0..10ee3aa9b7a 100755
--- a/testsuite/expect/test1.41
+++ b/testsuite/expect/test1.41
@@ -48,7 +48,14 @@ print_header $test_id
 #	KillWait (120 secs on BlueGene per IBM recommendation)
 #
 set timeout [expr $max_job_delay + 60 + 60 + 120]
-spawn $srun -N1-2 -n$task_cnt --overcommit --debugger-test -t1 $bin_id  
+
+if { [test_bluegene] } {
+	set node_cnt 1-1024
+} else {
+	set node_cnt 1-2
+}
+
+spawn $srun -N$node_cnt -n$task_cnt --overcommit --debugger-test -t1 $bin_id  
 expect {
 	-re "uid=" {
 		send_user "\nFAILURE: task not stopped\n"
diff --git a/testsuite/expect/test1.42 b/testsuite/expect/test1.42
index 24f246630cd..ae76da66356 100755
--- a/testsuite/expect/test1.42
+++ b/testsuite/expect/test1.42
@@ -53,7 +53,7 @@ set env(SLURM_ACCOUNT) QA_ACCT
 #
 exec $bin_rm -f $file_in
 exec echo "#!$bin_bash"     >$file_in
-exec echo "$bin_sleep 5"  >>$file_in
+exec echo "$bin_sleep 5"   >>$file_in
 exec $bin_chmod 700 $file_in
 
 #
@@ -86,13 +86,13 @@ if {$job_id1 == 0} {
 #
 set match_acct  0
 set match_state 0
-spawn $srun -v --dependency=$job_id1 -t1 $scontrol show job $job_id1
+spawn $srun -v --dependency=$job_id1 $scontrol show job $job_id1
 expect {
 	-re "launching ($number).0" {
 		set job_id2 $expect_out(1,string)
 		exp_continue
 	}
-	-re "JobState=COMPLETE" {
+	-re "JobState=COMPLETED" {
 		set match_state 1
 		exp_continue
 	}
@@ -123,7 +123,7 @@ if {$match_state == 0} {
 # Confirm dependency info within second job as well as environment 
 # variable based SLURM account
 #
-set match_acct
+set match_acct 0
 set match_jobid 0
 spawn $scontrol show job $job_id2
 expect {
@@ -132,7 +132,7 @@ expect {
 		exp_continue
 	}
 	-re "Account=QA_ACCT" {
-		set match_acct
+		set match_acct 1
 		exp_continue
 	}
 	timeout {
diff --git a/testsuite/expect/test1.45 b/testsuite/expect/test1.45
index 3a860a62a0f..d7382e73fe8 100755
--- a/testsuite/expect/test1.45
+++ b/testsuite/expect/test1.45
@@ -56,7 +56,13 @@ exec $bin_chmod 700 $file_in
 # Spawn a srun batch job that uses stdout/err and confirm their contents
 #
 set timeout $max_job_delay
-spawn $srun -N1-4 -A -v
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
+
+spawn $srun -N$node_cnt -A -v
 expect {
 	-re "jobid ($number):" {
 		set job_id_1 $expect_out(1,string)
diff --git a/testsuite/expect/test1.47 b/testsuite/expect/test1.47
index df55a54d2ec..86da1eff529 100755
--- a/testsuite/expect/test1.47
+++ b/testsuite/expect/test1.47
@@ -109,7 +109,7 @@ if {$matches != 2} {
 #
 exec $bin_rm -f $file_in
 exec echo "#!$bin_bash"              >$file_in
-exec echo "#SLURM -N65000"          >>$file_in
+exec echo "#SLURM -N1000000k"          >>$file_in
 exec echo "$bin_sleep $delay"       >>$file_in
 exec $bin_chmod 700 $file_in
 
diff --git a/testsuite/expect/test1.49 b/testsuite/expect/test1.49
index 0c97743b627..aabe6e8a5a6 100755
--- a/testsuite/expect/test1.49
+++ b/testsuite/expect/test1.49
@@ -71,7 +71,13 @@ exec $bin_chmod 700 $task_prolog $task_epilog $file_in
 #
 set matches 0
 set timeout $max_job_delay
-spawn $srun -N1-2 -n$tasks -O -t1 --task-prolog=$task_prolog --task-epilog=$task_epilog $file_in
+if { [test_bluegene] } {
+	set node_cnt 1-1024
+} else {
+	set node_cnt 1-2
+}
+
+spawn $srun -N$node_cnt -n$tasks -O -t1 --task-prolog=$task_prolog --task-epilog=$task_epilog $file_in
 expect {
 	-re "TEST==prolog_qa" {
 		incr matches
diff --git a/testsuite/expect/test1.81 b/testsuite/expect/test1.81
index 2dd31f70fde..4a25cf7a718 100755
--- a/testsuite/expect/test1.81
+++ b/testsuite/expect/test1.81
@@ -74,10 +74,11 @@ if {$have_arg == 0} {
 	spawn $srun -N1-1 -c 1 -l -t1 $bin_hostname $hostname_arg
 } 
 expect {
-	-re "Unable to create job step: Task count specification invalid" {
-		send_user "\nWARNING: This is not a real error for some system configurations\n"
+	-re "(Task count specification invalid|configuration is not available)" {
 		if { [test_front_end] } {
 			set can_not_run 1
+			send_user "\nWARNING: error expected, Testing is incompatable with front-end systems\n"
+#	
 		}
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.82 b/testsuite/expect/test1.82
index 6c653dc6e24..3eb0b902651 100755
--- a/testsuite/expect/test1.82
+++ b/testsuite/expect/test1.82
@@ -46,7 +46,14 @@ set host_1      ""
 set host_2      ""
 set host_3      ""
 set timeout $max_job_delay
-spawn $srun -N3 -l -t1 $bin_hostname  
+
+if { [test_bluegene] } {
+	set node_cnt 1536
+} else {
+	set node_cnt 3
+}
+
+spawn $srun -N$node_cnt -l -t1 $bin_hostname  
 expect {
 	-re "More ($alpha) requested than permitted" {
 		send_user "\nWARNING: can't test srun task distribution\n"
diff --git a/testsuite/expect/test1.84 b/testsuite/expect/test1.84
index 81d4cf51e5c..c5e46ecd35e 100755
--- a/testsuite/expect/test1.84
+++ b/testsuite/expect/test1.84
@@ -49,7 +49,11 @@ set cpu_count      0
 set fat_nodes      0
 set host           ""
 set timeout $max_job_delay
-spawn $srun -N1 --cpus-per-task=1 -l -t1 $bin_hostname  
+set node_cnt 1
+set task_cnt 1
+
+
+spawn $srun -N$node_cnt --cpus-per-task=$task_cnt -l -t1 $bin_hostname  
 expect {
 	-re "($number): ($alpha_numeric)" {
 		set task $expect_out(1,string)
diff --git a/testsuite/expect/test10.12 b/testsuite/expect/test10.12
index 0ed54b84908..90b6c26f7af 100755
--- a/testsuite/expect/test10.12
+++ b/testsuite/expect/test10.12
@@ -34,7 +34,7 @@ source ./globals
 set test_id     "10.12"
 set exit_code   0
 set check       0
-set non_bg     0
+set non_bg      0
 
 print_header $test_id
 
diff --git a/testsuite/expect/test10.13 b/testsuite/expect/test10.13
index 19883755e53..30550142bd6 100755
--- a/testsuite/expect/test10.13
+++ b/testsuite/expect/test10.13
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # UCRL-CODE-217948.
@@ -35,7 +35,7 @@ source ./globals
 set test_id       "10.13"
 set exit_code     0
 set matches       0
-set non_bg       0
+set non_bg        0
 set prompt        ".*$"
 set created       0
 set file          "bluegene-test.conf"
@@ -137,17 +137,32 @@ if { $exit_code != 1 } {
 			incr matches
 			exp_continue
 		}
-		-re "Nodes" {
+		-re "BasePartitionNodeCnt" {
 			set stuff [concat $stuff "8"]
 			incr matches
 			exp_continue
 		}
+		-re "NodeCardNodeCnt" {
+			set stuff [concat $stuff "9"]
+			incr matches
+			exp_continue
+		}
+		-re "LayoutMode" {
+			set stuff [concat $stuff "10"]
+			incr matches
+			exp_continue
+		}
+		-re "BPs" {
+			set stuff [concat $stuff "11"]
+			incr matches
+			exp_continue
+		}
 		eof {
 			wait
 		}
 	}
 
-	if {$matches != 8} {
+	if {$matches != 11} {
 		send_user "$matches\n$stuff"
 		send_user "\nFAILURE: smap bluegene.conf file was created but corrupt\n"
 		set exit_code 1
diff --git a/testsuite/expect/test10.3 b/testsuite/expect/test10.3
index 2472e4e4965..a7d4cd7bf34 100755
--- a/testsuite/expect/test10.3
+++ b/testsuite/expect/test10.3
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # UCRL-CODE-217948.
@@ -75,6 +75,11 @@ expect {
 		send "q"
 		exp_continue	        
 	}
+	-re "BP_LIST" {
+		incr matches
+		send "q"
+		exp_continue
+	}
 	
 	timeout {
 		send_user "\nFAILURE: smap not responding\n"
diff --git a/testsuite/expect/test10.4 b/testsuite/expect/test10.4
index ae754f00d4c..33407dd3ccb 100755
--- a/testsuite/expect/test10.4
+++ b/testsuite/expect/test10.4
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # UCRL-CODE-217948.
@@ -65,6 +65,10 @@ expect {
 		incr matches
 		exp_continue	        
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	
 	timeout {
 		send_user "\nFAILURE: smap not responding\n"
diff --git a/testsuite/expect/test10.5 b/testsuite/expect/test10.5
index 87beb2e7396..8fe41895a8c 100755
--- a/testsuite/expect/test10.5
+++ b/testsuite/expect/test10.5
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # UCRL-CODE-217948.
@@ -92,6 +92,11 @@ expect {
 		send "q"
 		exp_continue	        
 	}
+	-re "BP_LIST" {
+		incr matches
+		send "q"
+		exp_continue
+	}
 	
 	timeout {
 		send_user "\nFAILURE: smap not responding\n"
diff --git a/testsuite/expect/test10.6 b/testsuite/expect/test10.6
index 1a240c5a152..a37b4d2d869 100755
--- a/testsuite/expect/test10.6
+++ b/testsuite/expect/test10.6
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # UCRL-CODE-217948.
@@ -82,6 +82,10 @@ expect {
 		incr matches
 		exp_continue	        
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	
 	timeout {
 		send_user "\nFAILURE: smap not responding\n"
diff --git a/testsuite/expect/test10.7 b/testsuite/expect/test10.7
index 804a6e80060..afd7b467a0e 100755
--- a/testsuite/expect/test10.7
+++ b/testsuite/expect/test10.7
@@ -86,7 +86,7 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "NODELIST" {
+	-re "LIST" {
 		set stuff [concat $stuff "10"]
 		incr matches
 		send "q"
diff --git a/testsuite/expect/test10.8 b/testsuite/expect/test10.8
index 07892ab102e..c7940854540 100755
--- a/testsuite/expect/test10.8
+++ b/testsuite/expect/test10.8
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # UCRL-CODE-217948.
@@ -46,45 +46,50 @@ print_header $test_id
 spawn $smap -Dj -c
 expect {
 	-re "JOBID" {
-	set stuff [concat $stuff "2"]
+		set stuff [concat $stuff "2"]
 		incr matches
 		exp_continue
 	}
 	-re "PARTITION" {
-	set stuff [concat $stuff "3"]
+		set stuff [concat $stuff "3"]
 		incr matches
 		exp_continue
 	}
 	-re "USER" {
-	set stuff [concat $stuff "5"]
+		set stuff [concat $stuff "5"]
 		incr matches
 		exp_continue
 	}
 	-re "NAME" {
-	set stuff [concat $stuff "6"]
+		set stuff [concat $stuff "6"]
 		incr matches
 		exp_continue
 	}
 	-re " ST" {
-	set stuff [concat $stuff "7"]
+		set stuff [concat $stuff "7"]
 		incr matches
 		exp_continue
 	}
 	-re "TIME" {
-	set stuff [concat $stuff "8"]
+		set stuff [concat $stuff "8"]
 		incr matches
 		exp_continue
 	}
 	-re "NODES" {
-	set stuff [concat $stuff "9"]
+		set stuff [concat $stuff "9"]
 		incr matches
 		exp_continue
 	}
 	-re "NODELIST" {
-	set stuff [concat $stuff "10"]
+		set stuff [concat $stuff "10"]
 		incr matches
 		exp_continue	        
 	}
+	-re "BP_LIST" {
+		set stuff [concat $stuff "10"]
+		incr matches
+		exp_continue
+	}
 	
 	timeout {
 		send_user "\nFAILURE: smap not responding\n"
diff --git a/testsuite/expect/test10.9 b/testsuite/expect/test10.9
index 09cee9b1310..0eb499debe8 100755
--- a/testsuite/expect/test10.9
+++ b/testsuite/expect/test10.9
@@ -51,7 +51,7 @@ expect {
 		set too_small 1
 		exp_continue
 	}
-	-re "NODELIST" {
+	-re "LIST" {
 		incr matches
 		if { $matches == 1 } {
 			send "s"
diff --git a/testsuite/expect/test3.7 b/testsuite/expect/test3.7
index b00fc582b81..0ab0293d1f7 100755
--- a/testsuite/expect/test3.7
+++ b/testsuite/expect/test3.7
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005 The Regents of the University of California.
+# Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # UCRL-CODE-217948.
@@ -150,6 +150,10 @@ expect {
 		set host_name $expect_out(1,string)
 		exp_continue
 	}
+	-re "BP_List=($alpha_numeric_under)" {
+		set host_name $expect_out(1,string)
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: scontrol not responding\n"
 		kill_srun
diff --git a/testsuite/expect/test4.11 b/testsuite/expect/test4.11
index 131ca3fc832..dfcd975e3e3 100755
--- a/testsuite/expect/test4.11
+++ b/testsuite/expect/test4.11
@@ -7,7 +7,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # UCRL-CODE-217948.
@@ -47,6 +47,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "REASON *BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: sinfo not responding\n"
 		set exit_code 1
diff --git a/testsuite/expect/test4.3 b/testsuite/expect/test4.3
index d819cfbfb2b..2fe9087cf78 100755
--- a/testsuite/expect/test4.3
+++ b/testsuite/expect/test4.3
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # UCRL-CODE-217948.
@@ -70,6 +70,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	-re "STATE" {
 		incr matches -10
 		exp_continue
@@ -159,6 +163,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	-re "ROOT" {
 		incr matches -10
 		exp_continue
@@ -233,6 +241,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: sinfo not responding\n"
 		set exit_code 1
diff --git a/testsuite/expect/test4.4 b/testsuite/expect/test4.4
index 72e1bbbcf0c..7e8458441db 100755
--- a/testsuite/expect/test4.4
+++ b/testsuite/expect/test4.4
@@ -8,7 +8,7 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # UCRL-CODE-217948.
@@ -48,6 +48,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	-re "NODES " {
 		incr matches
 		exp_continue
@@ -91,6 +95,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "BP_LIST" {
+		incr matches
+		exp_continue
+	}
 	-re "NODES " {
 		incr matches
 		exp_continue
diff --git a/testsuite/expect/test7.2 b/testsuite/expect/test7.2
index 81ae7dc3229..47574497d1a 100755
--- a/testsuite/expect/test7.2
+++ b/testsuite/expect/test7.2
@@ -67,7 +67,13 @@ exec $bin_chmod 700 $file_prog_get
 # Spawn a job to test BNR functionality
 #
 set timeout $max_job_delay
-spawn $srun -l -N1-2 -n10 -O -t1 $file_prog_get
+if { [test_bluegene] } {
+	set node_cnt 1-1024
+} else {
+	set node_cnt 1-2
+}
+
+spawn $srun -l -N$node_cnt -n10 -O -t1 $file_prog_get
 expect {
 	-re "FAILURE" {
 		send_user "\nFAILURE: some error occured\n"
diff --git a/testsuite/expect/test8.3 b/testsuite/expect/test8.3
index f2a986bef86..907dcf5248d 100755
--- a/testsuite/expect/test8.3
+++ b/testsuite/expect/test8.3
@@ -41,8 +41,8 @@ set job_id      0
 
 print_header $test_id
 
-if {[test_front_end] == 0} {
-	send_user "\nWARNING: This test is only compatable with front-end systems\n"
+if {[test_bluegene] == 0} {
+	send_user "\nWARNING: This test is only compatable with bluegene systems\n"
 	 exit $exit_code
 }
 
@@ -59,7 +59,7 @@ exec $bin_chmod 700 $file_in
 # Submit a slurm job for blue gene with geometry of 2x1x1
 #
 set timeout $max_job_delay
-spawn $srun --geometry=2x1x1 --no-rotate --nodes=2-2 --batch --output=/dev/null --error=/dev/null $file_in
+spawn $srun --geometry=2x1x1 --no-rotate --nodes=1k-1k --batch --output=/dev/null --error=/dev/null $file_in
 expect {
 	-re "jobid ($number) submitted" {
 		set job_id $expect_out(1,string)
@@ -113,7 +113,7 @@ if {$job_id != 0} {
 # Submit a slurm job for blue gene with geometry of 1x2x1
 #
 set jobid 0
-spawn $srun --geometry=1x2x1 --no-rotate --nodes=2-2 --batch --output=/dev/null --error=/dev/null $file_in
+spawn $srun --geometry=1x2x1 --no-rotate --nodes=1k-1k --batch --output=/dev/null --error=/dev/null $file_in
 expect {
 	-re "jobid ($number) submitted" {
 		set job_id $expect_out(1,string)
@@ -164,7 +164,7 @@ if {$job_id != 0} {
 # Submit a slurm job for blue gene with geometry of 1x1x2
 #
 set jobid 0
-spawn $srun --geometry=1x1x2 --no-rotate --nodes=2-2 --batch --output=/dev/null --error=/dev/null $file_in
+spawn $srun --geometry=1x1x2 --no-rotate --nodes=1k-1k --batch --output=/dev/null --error=/dev/null $file_in
 expect {
 	-re "jobid ($number) submitted" {
 		set job_id $expect_out(1,string)
diff --git a/testsuite/expect/test9.1 b/testsuite/expect/test9.1
index 5f4b0bfe8b0..39cf8704732 100755
--- a/testsuite/expect/test9.1
+++ b/testsuite/expect/test9.1
@@ -38,9 +38,14 @@ set cycle_count  100
 set exit_code    0
 set file_in      "test$test_id.input"
 set file_out     "test$test_id.output"
-set node_cnt     "1-4"
+set task_cnt $max_stress_tasks
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
+
 set other_opts   "-O"
-set task_cnt     $max_stress_tasks
 
 print_header $test_id
 
diff --git a/testsuite/expect/test9.2 b/testsuite/expect/test9.2
index 0a029c3a7d4..ff6ff0bf1a4 100755
--- a/testsuite/expect/test9.2
+++ b/testsuite/expect/test9.2
@@ -37,7 +37,11 @@ set test_id      "9.2"
 set cycle_count  100
 set exit_code    0
 set file_out     "test$test_id.output"
-set node_cnt     "1-4"
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
 set other_opts   "-O"
 set task_cnt     $max_stress_tasks
 
diff --git a/testsuite/expect/test9.3 b/testsuite/expect/test9.3
index 330cb2b8128..6b769fc13a8 100755
--- a/testsuite/expect/test9.3
+++ b/testsuite/expect/test9.3
@@ -38,7 +38,11 @@ set cycle_count  100
 set exit_code    0
 set file_in      "test$test_id.input"
 set file_out     "test$test_id.output"
-set node_cnt     "1-4"
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
 set other_opts   "-O"
 set task_cnt      $max_stress_tasks
 
diff --git a/testsuite/expect/test9.4 b/testsuite/expect/test9.4
index 1a974510afd..0e64addeb3f 100755
--- a/testsuite/expect/test9.4
+++ b/testsuite/expect/test9.4
@@ -43,7 +43,12 @@ set exit_code     0
 set file_in       "test$test_id.input"
 set file_in_task  "test$test_id.%t.input"
 set file_out_task "test$test_id.%t.output"
-set node_cnt      "1-4"
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
+
 set other_opts    "-O"
 set task_cnt      $max_stress_tasks
 
diff --git a/testsuite/expect/test9.5 b/testsuite/expect/test9.5
index 2aa4e3c0ebc..a89da8cb2f1 100755
--- a/testsuite/expect/test9.5
+++ b/testsuite/expect/test9.5
@@ -38,7 +38,11 @@ set exit_code     0
 set file_in       "test$test_id.input"
 set file_in_task  "test$test_id.%t.input"
 set file_out      "test$test_id.output"
-set node_cnt      "1-4"
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
 set other_opts    "-O"
 set task_cnt      $max_stress_tasks
 
diff --git a/testsuite/expect/test9.6 b/testsuite/expect/test9.6
index cedfb92bbff..3145d8ad7a3 100755
--- a/testsuite/expect/test9.6
+++ b/testsuite/expect/test9.6
@@ -38,7 +38,12 @@ set cycle_count  100
 set exit_code    0
 set file_in      "test$test_id.input"
 set file_out     "test$test_id.output"
-set node_cnt     "1-4"
+if { [test_bluegene] } {
+	set node_cnt 1-2048
+} else {
+	set node_cnt 1-4
+}
+
 set other_opts   "-O"
 set task_cnt      $max_stress_tasks
 
diff --git a/testsuite/expect/test9.7 b/testsuite/expect/test9.7
index 652f344104a..bda321e22f7 100755
--- a/testsuite/expect/test9.7
+++ b/testsuite/expect/test9.7
@@ -52,7 +52,7 @@ if {[test_front_end] != 0} {
 # Initiate $task_cnt parallel tasks
 #
 for {set inx 0} {$inx < $task_cnt} {incr inx} {
-	spawn $bin_bash $file_script $sinfo $srun $squeue $sleep_time $interations
+	spawn $bin_bash $file_script $sinfo $srun $squeue $sleep_time $interations [test_bluegene]
 	set task_id($inx) $spawn_id
 }
 
diff --git a/testsuite/expect/test9.7.bash b/testsuite/expect/test9.7.bash
index 4cad79c7278..54c586b7201 100755
--- a/testsuite/expect/test9.7.bash
+++ b/testsuite/expect/test9.7.bash
@@ -53,8 +53,14 @@ else
 	iterations=3
 fi
 
+if [ $5 ]; then
+    inx=512
+else
+    inx=1
+fi
+
 exit_code=0
-inx=1
+
 log="test9.7.$$.output"
 touch $log
 while [ $inx -le $iterations ]
diff --git a/testsuite/slurm_unit/slurmctld/security_2_2.bash b/testsuite/slurm_unit/slurmctld/security_2_2.bash
new file mode 100755
index 00000000000..24231b47464
--- /dev/null
+++ b/testsuite/slurm_unit/slurmctld/security_2_2.bash
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Define location of slurm executables (if not in default search path)
+#slurm_bin="/home/jette/slurm.way/bin/"
+
+# Create private config file
+# Set AuthType=auth/dummy
+file_orig=`${slurm_bin}scontrol show config | awk '{ if ( $1 ~ /SLURM_CONFIG_FILE/ ) { print $3 } }'`
+grep -iv AuthType <$file_orig >tmp.$$
+echo "AuthType=auth/dummy" >>tmp.$$
+
+# Run srun using this config file
+export SLURM_CONF=tmp.$$ 
+${slurm_bin}srun /bin/id
+
+# Clean up
+rm tmp.$$
-- 
GitLab