diff --git a/BUILD.NOTES b/BUILD.NOTES
index 8220a91732957ce7c022378ddfb475e43fae64de..c4bd27075644023b9bb2bcead8a2f55084890340 100644
--- a/BUILD.NOTES
+++ b/BUILD.NOTES
@@ -61,12 +61,14 @@ BlueGene build notes:
 
 To build and run on AIX:
 0. svn co https://eris.llnl.gov/svn/slurm/trunk slurm
+   svn co https://eris.llnl.gov/svn/slurm/private/proctrack-aix/trunk proctrack
    svn co https://eris.llnl.gov/svn/buildfarm/trunk buildfarm
    put the buildfarm directory in your search path
 1. export OBJECT_MODE=32
 2. Build with:
-   ./configure --enable-debug --prefix=<install-dir> \
-	--sysconfdir=<config-dir> --with-procrack=/opt/freeware/include \
+   ./configure --enable-debug --prefix=/opt/freeware \
+	--sysconfdir=/opt/freeware/etc/slurm
+        --with-proctrack=<your directory>/proctrack \
 	--with-ssl=/opt/freeware --with-munge=/opt/freeware
    make
    make uninstall  # remove old shared libraries, aix caches them
@@ -83,7 +85,7 @@ To build and run on AIX:
         %_defaultdocdir         %{_prefix}/doc
 
 	%_enable_debug    "--enable-debug"
-	%with_proctrack   "--with-proctrack=/admin/llnl/include"
+	%with_proctrack   "--with-proctrack=<your directory>/proctrack"
 	%with_ssl         "--with-ssl=/opt/freeware"
 	%with_munge       "--with-munge=/opt/freeware"
    build -s https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3
@@ -96,6 +98,10 @@ To build and run on AIX:
    There will be a log file create named /tmp/mplog.<jobid>.<taskid>
 7. If you update proctrack, be sure to run "slibclean" to clear cached
    version.
+8. Install the rpms slurm-*.ppc.rpm, slurm-aix-federation-*.ppc.rpm,
+   slurm-auth-munge-*.ppc.rpm, slurm-devel-*.ppc.rpm, and
+   slurm-sched-wiki-*.ppc.rpm in /usr/admin/inst.image/slurm/aix5.3 on an
+   OCF AIX machine (pdev is a good choice).
 
 AIX/Federation switch window problems
 To clean switch windows:     ntblclean =w 8 -a sni0
diff --git a/doc/html/configurator.html b/doc/html/configurator.html
index b6a93a99b6c68edace3beb54449aad5a10e07c6b..d255313ac52104b36a1a0d06087755c196e90b03 100644
--- a/doc/html/configurator.html
+++ b/doc/html/configurator.html
@@ -55,8 +55,8 @@ function get_radio_value(form)
 
 function displayfile()
 {
-  var printme = "# Slurm.conf file generated by configurator.html<br>" +
-   "# See the slurm.conf man page for more information<br>" +
+  var printme = "# slurm.conf file generated by configurator.html.<br>" +
+   "# See the slurm.conf man page for more information.<br>" +
    "#<br>" +
    "ControlMachine=" + document.config.control_machine.value + "<br>" +
    get_field("ControlAddr",document.config.control_addr) + "<br>" +
@@ -245,9 +245,9 @@ as do the SLURM compute node deamonds (slurmd). If not set, slurm ports
 are set by checking for an entry in <I>/etc/services</I> and if that 
 fails by using an interal default set at SLURM build time. 
 <P>
-<input type="text" name="slurmctld_port" value="7010"> <B>SlurmctldPort</B>
+<input type="text" name="slurmctld_port" value="6817"> <B>SlurmctldPort</B>
 <P>
-<input type="text" name="slurmd_port" value="7011"> <B>SlurmdPort</B>
+<input type="text" name="slurmd_port" value="6818"> <B>SlurmdPort</B>
 <P>
 
 <H2>Authentication</H2>
@@ -461,6 +461,6 @@ before terminating all remaining tasks. A value of zero indicates unlimited wait
 </FORM>
 <HR>
 <p class="footer">UCRL-WEB-217616<br>
-Last modified 5 July 2006</p>
+Last modified 15 September 2006</p>
 </BODY>
 
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index f4741e154cab1b38d26e87cf296fd1d3fcca3fed..adaa8a6638eb417a8cabfc760f4c6afb398898ed 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -398,7 +398,8 @@ The default value is "/var/run/slurmctld.pid".
 \fBSlurmctldPort\fR
 The port number that the SLURM controller, \fBslurmctld\fR, listens 
 to for work. The default value is SLURMCTLD_PORT as established at system 
-build time.  NOTE: Either \fBslurmctld\fR and \fBslurmd\fR daemons must not 
+build time. If none is explicitly specified, it will be set to 6817.  
+NOTE: Either \fBslurmctld\fR and \fBslurmd\fR daemons must not 
 execute on the same nodes or the values of \fBSlurmctldPort\fR and 
 \fBSlurmdPort\fR must be different.
 .TP
@@ -429,7 +430,8 @@ The default value is "/var/run/slurmd.pid".
 \fBSlurmdPort\fR
 The port number that the SLURM compute node daemon, \fBslurmd\fR, listens 
 to for work. The default value is SLURMD_PORT as established at system 
-build time. NOTE: Either slurmctld and slurmd daemons must not execute
+build time. If none is explicitly specified, its value will be 6818. 
+NOTE: Either slurmctld and slurmd daemons must not execute
 on the same nodes or the values of \fBSlurmctldPort\fR and \fBSlurmdPort\fR
 must be different.
 .TP
diff --git a/etc/slurm.conf.example b/etc/slurm.conf.example
index 14965a2dc363d44c735671284bdf9001fcd6bf90..9d48acbc59346500a841d5a7e1dff2634b5ff52d 100644
--- a/etc/slurm.conf.example
+++ b/etc/slurm.conf.example
@@ -1,644 +1,77 @@
-#  $Id$
-############################################################################### 
-#                   Sample configuration file for SLURM
-###############################################################################
 #
-# This file holds the system-wide SLURM configuration. It is read
-# by SLURM clients, daemons, and the SLURM API to determine where
-# and how to contact the SLURM controller, what other nodes reside
-# in the current cluster, and various other configuration information.
-#
-# SLURM configuration parameters take the form Keyword=Value, where
-# at this time, no spacing is allowed to surround the equals (=) sign.
-# Many of the config values are not mandatory, and so may be left
-# out of the config file. We will attempt to list the default 
-# values for those parameters in this file.
-#
-###############################################################################
-
-#  
-#     SLURM daemon configuration
-#     ========================================================================
-
-#
-# o Define the location of the SLURM controller and backup controller:
-#    "ControlMachine"   : hostname of the primary controller
-#    "ControlAddr"      : hostname used to contact the primary controller
-#    "BackupController" : hostname of the backup controller 
-#    "BackupAddr"       : hostname used to contact backup controller
-#
-# Example:
-#
-# ControlMachine=dev0
-# ControlAddr=edev0		# default: same as ControlMachine
-# BackupController=dev1		# default: no backup controller
-# BackupAddr=edev1		# default: same as BackupController
-
-#
-# o Define the SLURM controller "save state" directory
-#
-#   The SLURM controller, slurmctld, will periodically save state
-#   into this directory so that said state may be recovered after
-#   a fatal system error. For best results when using a backup 
-#   controller, the filesystem on which this directory resides 
-#   should be shared between the "ControlMachine" and "BackupController"
-#
-# Example:
-# 
-# StateSaveLocation=/mnt/slurm	# default: "/tmp"
-
-
-#
-# o Define the slurmd "save state" directory
-#
-#   The SLURM daemon executing on each host, slurmd, will periodically 
-#   save state into this directory so that said state may be recovered
-#   after a fatal system error. This pathname is shared by all hosts, 
-#   but the file must be unique on each host so this must reference a 
-#   local file system.
-#
-# Example:
-# 
-# SlurmdSpoolDir=/var/tmp/slurmd	# default: "/tmp/slurmd"
-
-
-#
-# o Define the "slurm" user
-#
-#   "SlurmUser" specifies the user that the SLURM controller should run
-#   as. The slurm controller has no need to run with elevated privileges,
-#   so a user other than "root" is suggested here. 
-#
-# Example:
-#
-# SlurmUser=slurm
-
-
-#
-# o If you have a slow NIS environment,
-#
-#   big parallel jobs take a long time to start up (and may eventually
-#   time-out) because the NIS server(s) may not be able to quickly
-#   respond to simultaneous requests from multiple slurmd's.  You can
-#   instruct slurmd to cache /etc/groups entries to prevent this from
-#   happening by setting "CacheGroups=1".  Reconfiguring ("scontrol reconfig") 
-#   with CacheGroups=0  will cause slurmd to purge the cache.
-#
-#   WARNING: The group ID cache does not try to keep itself in sync with
-#            the system.  You MUST run "scontrol reconfig" to update the
-#            cache after making any changes to system password or group
-#            databases.
-#
-# Example:
-#
-# CacheGroups=1		# default is `0'
-
-
-#
-# o Define the slurmctld and slurmd server port numbers
-#
-#  by default, the slurmctld ports are set by checking for an entry in
-#  /etc/services, and if that fails, by using an internal default set
-#  at build time. That process will be overridden by these config 
-#  parameters.
-#
-#    "SlurmctldPort"    : slurmctld server port 
-#    "SlurmdPort"       : slurmd server port
-#
-# Example:
-#
-# SlurmctldPort=7010 	# 
-# SlurmdPort=7011       #
-
-
-#
-# o Define slurmd and slurmctld logging options
-#
-#    "SlurmctldDebug"   : verbosity of slurmctld log messages 
-#                         (Values from 0 to 7 are legal, with `0' being
-#                          "quiet" operation and `7' being insanely verbose)
-#
-#    "SlurmdDebug"      : verbosity of slurmd log messages (0-7, see above)
-#
-#    "SlurmctldLogFile" : fully qualified pathname to slurmctld logfile
-#                         (If a logfile is set for slurmctld, logging via
-#                          syslog will be turned off)
-#
-#    "SlurmdLogFile"    : fully qualified pathname to slurmd logfile,
-#                         may contain "%h" for hostname substitution
-#                         (same caveat as SlurmctldLogFile above)
-#
-# Example:
-#
-# SlurmctldDebug=4	# default is `3'  
-# SlurmdDebug=4		# default is `3'
-#
-# SlurmctldLogFile=/var/log/slurmctld.log  # default is to log via syslog()
-# SlurmdLogFile=/var/log/slurmd.log.%h     # substitute hostname for "%h"
-
-
-# o Define an alternate location for slurmd and slurmctld pid files, 
-#   SlurmctldPidFile and SlurmdPidFile should have different values
-#  
-#    "SlurmctldPidFile" : fully qualified pathname containing slurmctld pid
-#
-#    "SlurmdPidFile"    : fully qualified pathname containing slurmd pid
-#    
-# Example:
-#
-# SlurmctldPidFile=/var/slurm/slurmctld.pid  # default: "/var/run/slurmctld.pid"
-# SlurmdPidFile=/var/slurm/slurmd.pid     # default: "/var/run/slurmd.pid"
-
-#
-# o Define the authentication method for communicating between SLURM
-#   components
-#
-# "auth/none"   : no authentication, the default
-# "auth/authd"  : Brent Chun's authd
-# "auth/munge"  : LLNL's munge
-#
-# WARNING: The use of "auth/none" permits any user to execute jobs as any 
-# other user. This may be fine for testing purposes, but do not use it in production.
-#
-# AuthType=auth/none
-
-
-# o Define TreeWidth for communication to the slurmds.  Slurmds use
-# a virtual tree network, this variable specifies the width of the tree
-#
-# Default is 50
-#
-# TreeWidth=50
-
-
-#
-# o Define a scheduler.
-#
-# "SchedulerType"	 : the type of scheduler. Orders pending jobs.
-#	"sched/builtin"	 : the default, SLURM's built-in FIFO scheduler.
-#	"sched/backfill" : FIFO scheduling with backfill.
-#	"sched/hold"     : hold all new jobs if "/etc/slurm.hold" exists, 
-#	                   otherwise perform FIFO scheduling.
-#	"sched/wiki"	 : the Wiki interface to Maui.
-#
-# "SchedulerAuth"	 : an authentication token, if any, that must
-#			   be used in a scheduler communication
-#			   protocol.  The interpretation of this value
-#			   depends on the plugin type.
-#
-# "SchedulerPort"	 : for polling schedulers, the port number on
-#			   which slurmctld should listen for connection
-#			   requests.
-#
-# "SchedulerRootFilter"	 : for schedulers that support it (currently only
-#			   sched/backfill). If set to '1' then scheduler
-#			   will filter and avoid RootOnly partitions (let
-#			   root user or process schedule these partitions).
-#			   Otherwise scheduler will treat RootOnly
-#			   partitions as any other standard partition.
-#
-# SchedulerType=sched/wiki
-# SchedulerAuth=42
-# SchedulerPort=7321
-# SchedulerRootFilter=0	# default is '1'
-
-
-#
-# "SelectType"			: node selection logic for scheduling.
-#	"select/bluegene"	: the default on BlueGene systems, aware of
-#				  system topology, manages bglblocks, etc.
-#	"select/cons_res"	: allocate individual consumable resources
-#				  (i.e. processors, memory, etc.)
-#	"select/linear"		: the default on non-BlueGene systems,
-#				  no topology awareness, oriented toward
-#				  allocating nodes to jobs rather than
-#				  resources within a node (e.g. CPUs)
-#
-# SelectType=select/linear
-
-
-#
-# "JobCompType"			:  Define the job completion logging
-#				   mechanism to be used
-#	"jobcomp/none"		: no job logging, the default
-#	"jobcomp/filetxt"	: log job record to a text file
-#	"jobcomp/script"	: execute an arbitrary script
-#
-# JobCompType=jobcomp/filetxt
-
-
-#
-# o Define location where job completion logs are to be written
-#   Interpretation of the parameter is dependent upon the logging
-#   mechanism used (specified by the JobCompType parameter). For
-#   "JobCompType=jobcomp/filetxt" the value of JobCompLoc should
-#   be the fully qualified pathname of a file into which text
-#   records are appended.
-#
-# JobCompLoc=/var/log/slurm.job.log
-
-
-#
-# o Define the switch or interconnect in use.
-#
-# "SwitchType"         : the type of switch or interconnect.
-#     switch/none      : the default, supports all switches not requiring
-#                        special set-up for job launch including Myrinet, 
-#                        Ethernet, and InfiniBand.
-#     switch/federation: IBM Federation switch
-#     switch/elan      : Quadrics Elan 3 or Elan 4 interconnect.
-#
-# SwitchType=switch/none
-
-
-#
-# o Define the process tracking mechanism in use.
-#
-# "ProctrackType"           : the type of process tracking mechanism
-#     "proctrack/aix"       : use AIX kernel extension for process tracking,
-#                             the default value on AIX computers
-#     "proctrack/linuxproc" : use parent process ID to establish process
-#                             tree, required for MPICH-GM use
-#     "proctrack/rms"       : use Quadrics kernal infrastructure to track 
-#                             processes, strongly recommended for systems
-#                             with a Quadrics switch
-#     "proctrack/sgi_job    : which uses SGI’s Process Aggregates (PAGG)
-#                             kernel module, see http://oss.sgi.comm/projects/pagg/
-#                             for more information
-#     "proctrack/pgid"      : use Unix process group ID for process tracking,
-#                             the default value on all other computers
-
-# ProctrackType=proctrack/pgid
-
-
-#
-# o Define the job accounting mechanism to use.
-#
-#   "jobacct/aix"      : Job accounting information
-#                        from the AIX proc table
-#   "jobacct/linux"    : Job accouting information 
-#                        from the linux proc table
-#   "jobacct/none"     : No job accouting information.
-#
-
+# Example slurm.conf file. Please run configurator.html
+# (in doc/html) to build a configuration file customized
+# for your environment.
+#
+#
+# slurm.conf file generated by configurator.html.
+#
+# See the slurm.conf man page for more information.
+#
+ControlMachine=linux0
+#ControlAddr=
+#BackupController=
+#BackupAddr=
+#
+SlurmUser=slurm
+SlurmctldPort=6817
+SlurmdPort=6818
+AuthType=auth/munge
+#JobCredentialPrivateKey=
+#JobCredentialPublicCertificate=
+StateSaveLocation=/tmp
+SlurmdSpoolDir=/tmp/slurmd
+SwitchType=switch/none
+MpiDefault=none
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+ProctrackType=proctrack/pgid
+#PluginDir=
+CacheGroups=0
+#FirstJobId=
+ReturnToService=0
+#MaxJobCount=
+#PlugStackConfig=
+#PropagatePrioProcess=
+#PropagateResourceLimits=
+#PropagateResourceLimitsExcept=
+#Prolog=
+#Epilog=
+#SrunProlog=
+#SrunEpilog=
+#TaskProlog=
+#TaskEpilog=
+#TaskPlugin=
+#TmpFs=
+#UsePAM=
+#
+# TIMERS
+SlurmctldTimeout=300
+SlurmdTimeout=300
+InactiveLimit=0
+MinJobAge=300
+KillWait=30
+Waittime=0
+#
+# SCHEDULING
+SchedulerType=sched/backfill
+#SchedulerAuth=
+#SchedulerPort=
+#SchedulerRootFilter=
+SelectType=select/linear
+FastSchedule=1
+#
+# LOGGING
+SlurmctldDebug=3
+#SlurmctldLogFile=
+SlurmdDebug=3
+#SlurmdLogFile=
+JobCompType=jobcomp/none
+#JobCompLoc=
 JobAcctType=jobacct/none
-
-#
-# o Define the Frequency of the JobAcct poll thread
-#
-# JobAcctFrequency=30
-
-#
-# o Define the log file for job accounting this will be written on the
-#   same node the slurmctld is being ran on.  
-#
-#JobAcctLogFile=/var/log/slurm_jobacct.log
-
-#
-# o Define the places to look for SLURM plugins.  This is a
-#   colon-separated list of directories, just like the PATH
-#   environment variable.
-#
-# PluginDir=/etc/slurm/plugins # default: PREFIX/lib/slurm
-
-
-#
-# o Define some timeout values for the slurm controller and backup
-#
-#    "SlurmctldTimeout" : amount of time, in seconds, backup controller
-#                         waits for primary controller to respond 
-#                         before assuming control.
-#
-#    "SlurmdTimeout"    : amount of time, in seconds, the controller
-#                         waits for slurmd to respond before setting the
-#                         node's state to DOWN. If set to 0, this feature
-#                         is disabled.
-#
-#    "MessageTimeout"   : amount of time, in seconds, allocated for a 
-#			  round-trip communication before it times out.
-#
-#    "InactiveLimit"    : The interval, in seconds, a job or job step is 
-#                         permitted to be inactive (srun command not responding)
-#                         before being terminated.
-#
-#    "MinJobAge"        : The time, in seconds, after a job completes before
-#                         its record is purged from the active slurmctld data.
-#   
-#    "KillWait"         : The time, in seconds, between SIGTERM and SIGKILL
-#                         signals sent to a job upon reaching its timelimit.
-#
-#    "WaitTime"         : Specifies how many seconds srun should wait after the 
-#                         first task terminates before terminating all remaining  
-#                         tasks. If set to 0, this feature is disabled.
-#
-# Example:
-#
-# SlurmctldTimeout=120	# Defaults to 120 seconds
-# SlurmdTimeout=300	# Defaults to 300 seconds
-# MessageTimeout=10	# Defaults to 5 seconds
-# InactiveLimit=600	# Defaults to 0 (unlimited)
-# MinJobAge=30		# Defaults to 300 seconds
-# KillWait=10		# Defaults to 30 seconds
-# WaitTime=30		# Defaults to 0 (unlimited)
-
-
-#
-# o Define other miscellaneous SLURM controller configuration values:
-#
-#    "FastSchedule"     : if set to `1' consider the configuration of nodes
-#                         to be exactly that set in the config file. Otherwise,
-#                         consider configuration of nodes to that which is
-#                         reported by the node's slurmd. A FastSchedule value of
-#                         zero will result in significantly slower scheduling.
-#
-#    "FirstJobId"       : Number of the first assigned job id.
-#
-#    "ReturnToService"  : if set to `1,' nodes in the DOWN state will be
-#                         set to IDLE after they come back up. Otherwise,
-#                         nodes will stay in the down state until manually
-#                         brought into the IDLE state.
-# 
-#    "MaxJobCount"      : defines the maximum number of jobs slurmctld can 
-#                         have in its active database at one time. Set the 
-#                         values of MaxJobCount and MinJobAge so as to avoid 
-#                         having slurmctld exhaust its memory or other resources.
-#
-#    "MpiDefault"	: define the default type of MPI to be used. If
-#			  srun does not specify another value, slurm will 
-#			  establish the environment for this mpi to execute.
-#			  Currently supported values are lam (for LAM MPI and 
-#                         Open MPI), mpich-gm, mvapich, and none (default, 
-#                         which works for most other versions of MPI).
-#
-# Example:
-#
-# FastSchedule=0		# default is `1'
-# FirstJobid=1000       	# default is `1'
-# ReturnToService=1     	# default is `0'
-# MaxJobCount=10000		# Defaults to 2000
-# MpiDefault			# default is "none"
-
-
-#
-# o Define Process Priority Propagation Configuration
-#
-#    "PropagatePrioProcess"
-#                       : if set to `1', the priority (aka nice value) of the
-#                         process that launched the job on the submit node,
-#                         (typically the users shell), will be propagated to
-#                         the compute nodes and set for the users job.  If set
-#                         to `0', or left unset, the users job will inherit the
-#                         scheduling priority from the slurm daemon.
-#
-# Example:
-#
-# PropagatePrioProcess=1       # default is `0'
-
-
-#
-# o Define the Resource Limit Propagation Configuration
-#
-#   These two parameters can be used to specify which resource limits to
-#   propagate from the users environment on the submit node to the users job
-#   environment on the compute nodes.  This can be useful when system limits
-#   vary among nodes.  By default, (when neither parameter is  specified), all
-#   resource limits are propagated.   The values of non-propagated resource
-#   limits are determined by the system limits configured on the compute
-#   nodes.   Only one of these two parameters may be specified.
-#
-#    "PropagateResourceLimits"       : A list of one or more comma-separated
-#                                      resource limits whose (soft) values
-#                                      will be set at job startup on behalf of
-#                                      the user.  Any resource limit that is
-#                                      not listed here, will not be propagated,
-#                                      (unless the user overrides this setting
-#                                      with the 'srun --propagate' switch).
-#
-#
-#    "PropagateResourceLimitsExcept" : A list of one or more comma-separated
-#                                      resource limits which will not be
-#                                      propagated.  Any resource limit that is
-#                                      not listed here, will be propagated.
-#   
-#                                The following resource limits are supported:
-#
-#                                RLIMIT_NPROC   RLIMIT_MEMLOCK   RLIMIT_CORE
-#                                RLIMIT_FSIZE   RLIMIT_CPU       RLIMIT_DATA
-#                                RLIMIT_STACK   RLIMIT_RSS       RLIMIT_NOFILE
-#                                RLIMIT_AS
-#
-# Examples:
-#
-# PropagateResourceLimits=RLIMIT_CORE,RLIMIT_DATA # The users RLIMIT_CORE and
-#                                                 # RLIMIT_DATA resource limit
-#                                                 # soft values will be applied
-#                                                 # to the job on startup.  All
-#                                                 # other resource limit soft
-#                                                 # values are determined by the
-#                                                 # system limits defined on
-#                                                 # the compute nodes.
-#
-# PropagateResourceLimitsExcept=RLIMIT_MEMLOCK    # All limits, except for
-#                                                 # MEMLOCK, are propagated.
-#
-
-
-#
-# o Define whether PAM (Pluggable Authentication Modules for Linux) will be
-#   used.
-#
-# PAM is a set of shared libraries that enables system administrators to select
-# the mechanism individual applications use to authenticate users. PAM also
-# provides services for account managment, credential management, session 
-# management and authentication-token (password changing) management. SLURM
-# uses PAM to obtain resource limits. This allows the system adminisrator to
-# dynamically configure resource limits without causing an interruption to
-# the service provided by SLURM.
-#
-# Also, for PAM to work properly with SLURM, a configuration file for SLURM
-# must be created and installed. See the slurm.conf man page for details about
-# this file.
-#
-# Example:
-#
-# UsePAM=1 or UsePAM=Yes   # default is not to use PAM
-
-
-#
-# o Define an epilog and a prolog
-#
-#    "Prolog" : fully qualified path to script that will be executed as 
-#               root on every node of a user's job before the job's tasks
-#               will be initiated there.
-#
-#    "Epilog" : fully qualified path to a script that will be executed as
-#               root on every node of a user's job after that job has 
-#               terminated.
-#
-# Example:
-#
-# Prolog=/usr/local/slurm/prolog	# default is no prolog
-# Epilog=/usr/local/slurm/epilog	# default is no epilog
-
-
-# 
-# o Define programs to be executed by srun at job step initiation and 
-#   termination. These parameters may be overridden by srun's --prolog 
-#   and --epilog options.
-#
-# Example:
-#
-# SrunProlog=/usr/local/slurm/srun_prolog   # default is no srun prolog
-# SrunEpilog=/usr/local/slurm/srun_epilog   # default is no srun epilog
-
-
-#
-# o Define task launch specific parameters
-#
-#    "TaskProlog" : Define a program to be executed as the user before each 
-#                   task begins execution.
-#    "TaskEpilog" : Define a program to be executed as the user after each 
-#                   task terminates.
-#    "TaskPlugin" : Define a task launch plugin. This may be used to 
-#                   provide resource management within a node (e.g. pinning
-#                   tasks to specific processors). Permissible values are:
-#      "task/none"     : no task launch actions, the default.
-#      "task/affinity" : CPU affinity support (see srun man pages for 
-#                        --cpu_bind and --mem_bind options)
-#
-# Example:
-#
-# TaskProlog=/usr/local/slurm/etc/task_prolog # default is none
-# TaskEpilog=/usr/local/slurm/etc/task_epilog # default is none
-# TaskPlugin=task/affinity                    # default is task/none
-
-
-#
-# o Define the temporary file system 
-#
-#    "TmpFS"  : Defines the location of local temporary storage filesystem 
-#               on remote nodes. This filesystem will be used in reporting
-#               each node's TmpDisk space.
-#
-# Example:
-#
-# TmpFs=/var/tmp	# default "/tmp"
-
-
-#
-# o Define the location of the private and public keys used by SLURM
-#   to generate job credentials.
-#
-#    "JobCredentialPrivateKey"       : Full pathname to the private key
-#
-#    "JobCredentialPublicCertificate : Full pathname to the public cert.
-#
-# Example:
-#
-# JobCredentialPrivateKey=/etc/slurm/slurm.key
-# JobCredentialPublicCertificate=/etc/slurm/slurm.cert 
-
-
-#
-#     Node and Partition Configuration
-#     ========================================================================
-
-#
-#  o Node configuration
-#
-#    The configuration information of nodes (or machines) to be managed 
-#    by SLURM is described here. The only required value in this section
-#    of the config file is the "NodeName" field, which specifies the 
-#    hostnames of the node or nodes to manage. It is recommended, however,
-#    that baseline values for the node configuration be established
-#    using the following parameters (see slurm.config(5) for more info): 
-#
-#     "NodeName"   : The only required node configuration parameter, NodeName
-#                    specifies a node or set of nodes to be managed by SLURM.
-#                    The special NodeName of "DEFAULT" may be used to establish
-#                    default node configuration parameters for subsequent node
-#                    records. Typically this would be the string that 
-#                    `/bin/hostname -s` would return on the node. However 
-#                    NodeName may be an arbitrary string if NodeHostname is 
-#                    used (see below).
-#
-#     "Feature"    : comma separated list of "features" for the given node(s) 
-#
-#     "NodeAddr"   : preferred address for contacting the node. This may be 
-#                    either a name or IP address.
-#
-#     "NodeHostname"
-#                  : the string that `/bin/hostname -s` would return on the
-#                    node.  In other words, NodeName may be the name other than
-#                    the real hostname.
-#
-#     "RealMemory" : Amount of real memory (in Megabytes)
-#
-#     "Procs"      : Number of CPUs 
-#
-#     "State"      : Initial state (IDLE, DOWN, etc.)
-#
-#     "TmpDisk"    : Temporary disk space available on node
-#
-#     "Weight"     : Priority of node for scheduling purposes
-#
-#   If any of the above values are set for a node or group of nodes, and
-#   that node checks in to the slurm controller with less than the 
-#   configured resources, the node's state will be set to DOWN, in order
-#   to avoid scheduling any jobs on a possibly misconfigured machine.
-#
-# Example Node configuration:
-#
-# NodeName=DEFAULT Procs=2 TmpDisk=64000 State=UNKNOWN
-# NodeName=host[0-25] NodeAddr=ehost[0-25] Weight=16
-# NodeName=host26     NodeAddr=ehost26     Weight=32 Feature=graphics_card
-
-#
-# o Partition Configuration
-#
-#   Paritions are groups of nodes which (possibly) have different limits
-#   and access controls. Nodes may be in multiple partitions. Jobs will
-#   not be allowed to span partitions. The following partition configuration
-#   parameters are recognized:
-#
-#    "PartitionName" : Name used to reference this partition. The special
-#                      PartitionName of "DEFAULT" may be used to establish
-#                      default partition configurations parameters for 
-#                      subsequent partition records.
-#
-#    "Nodes"         : list of nodes that compose this partition
-#
-#    "AllowGroups"   : Comma separated list of group ids which are allowed
-#                      to use the partition. Default is "ALL" which allows
-#                      all users to access the partition.
-#
-#    "Default"       : if "YES" the corresponding partition will be the 
-#                      default when users submit jobs without specification
-#                      of a desired partition.
-#
-#    "RootOnly"      : only user id zero (root) may use this partition
-#
-#    "MaxNodes"      : Maximum count of nodes that will be allocated to any
-#                      single job. The default is unlimited or `-1'
-#
-#    "MaxTime"       : Maximum timelimit of jobs in this partition in minutes.
-#                      The default is unlimited or `-1'
-#
-#    "MinNodes"      : Minimum count of nodes that will be allocated to any
-#                      single job. The default is `1'
-#
-#    "Shared"        : Allow sharing of nodes by jobs. Possible values are
-#                      "YES" "NO" or "FORCE"
-#
-#    "State"         : State of partition. Possible values are "UP" or "DOWN"
-#
-#
-# Example Partition Configurations:
-#
-# PartitionName=DEFAULT MaxTime=30 MaxNodes=26
-# PartitionName=debug Nodes=host[0-8,18-25] State=UP Default=YES
-# PartitionName=batch Nodes=host[9-17,26]   State=UP
-#
-#
-
+#JobAcctLogfile=
+#JobAcctFrequency=
 #
+# COMPUTE NODES
+NodeName=linux[1-32] Procs=1 State=UNKNOWN
+PartitionName=debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP
diff --git a/slurm.spec b/slurm.spec
index 1949356f6eaed5ce00011cd019f12a075fdefc9d..42408709ec224c3aa8ebecd54e4a5d9064b4dc60 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -208,8 +208,8 @@ LIST=./bluegene.files
 touch $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/select_bluegene.so &&
   echo "%{_libdir}/slurm/select_bluegene.so"      >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if.so &&
-  echo "%{_libdir}/slurm/libsched_if.so"          >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if64.so &&
+  echo "%{_libdir}/slurm/libsched_if64.so"        >> $LIST
 echo "%{_mandir}/man5/bluegene.*"                 >> $LIST
 echo "%{_sbindir}/slurm_epilog"                   >> $LIST
 echo "%{_sbindir}/slurm_prolog"                   >> $LIST
diff --git a/src/plugins/select/bluegene/plugin/Makefile.am b/src/plugins/select/bluegene/plugin/Makefile.am
index 8ba2dcee6efbac6c862d05c6ef19365b356b2a95..5ba952deda516ee44adfa2e33b3a13fdb36880de 100644
--- a/src/plugins/select/bluegene/plugin/Makefile.am
+++ b/src/plugins/select/bluegene/plugin/Makefile.am
@@ -7,7 +7,7 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic -lm
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(BG_INCLUDES)
 
-pkglib_LTLIBRARIES = select_bluegene.la libsched_if.la
+pkglib_LTLIBRARIES = select_bluegene.la libsched_if64.la
 
 # Blue Gene node selection plugin.
 select_bluegene_la_SOURCES = select_bluegene.c \
@@ -26,7 +26,8 @@ select_bluegene_la_LIBADD  = ../block_allocator/libbluegene_block_allocator.la
 
 
 # MPIRUN dynamic lib.
-libsched_if_la_SOURCES = libsched_if.c 
+libsched_if64_la_SOURCES = libsched_if64.c 
+libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 
 
 sbin_PROGRAMS = slurm_prolog slurm_epilog sfree
diff --git a/src/plugins/select/bluegene/plugin/Makefile.in b/src/plugins/select/bluegene/plugin/Makefile.in
index 160d1aad565e69808bc0f53dec460d73de77bfde..f445335e17a99db5e0658a30c4f7c75fa418cb56 100644
--- a/src/plugins/select/bluegene/plugin/Makefile.in
+++ b/src/plugins/select/bluegene/plugin/Makefile.in
@@ -17,7 +17,7 @@
 # Makefile for select/bluegene plugin
 
 
-SOURCES = $(libsched_if_la_SOURCES) $(select_bluegene_la_SOURCES) $(sfree_SOURCES) $(slurm_epilog_SOURCES) $(slurm_prolog_SOURCES)
+SOURCES = $(libsched_if64_la_SOURCES) $(select_bluegene_la_SOURCES) $(sfree_SOURCES) $(slurm_epilog_SOURCES) $(slurm_prolog_SOURCES)
 
 srcdir = @srcdir@
 top_srcdir = @top_srcdir@
@@ -80,9 +80,9 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
 am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
-libsched_if_la_LIBADD =
-am_libsched_if_la_OBJECTS = libsched_if.lo
-libsched_if_la_OBJECTS = $(am_libsched_if_la_OBJECTS)
+libsched_if64_la_LIBADD =
+am_libsched_if64_la_OBJECTS = libsched_if64.lo
+libsched_if64_la_OBJECTS = $(am_libsched_if64_la_OBJECTS)
 select_bluegene_la_DEPENDENCIES =  \
 	../block_allocator/libbluegene_block_allocator.la
 am_select_bluegene_la_OBJECTS = select_bluegene.lo bg_job_place.lo \
@@ -115,12 +115,12 @@ LTCOMPILE = $(LIBTOOL) --mode=compile --tag=CC $(CC) $(DEFS) \
 CCLD = $(CC)
 LINK = $(LIBTOOL) --mode=link --tag=CC $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
 	$(AM_LDFLAGS) $(LDFLAGS) -o $@
-SOURCES = $(libsched_if_la_SOURCES) $(select_bluegene_la_SOURCES) \
-	$(sfree_SOURCES) $(slurm_epilog_SOURCES) \
-	$(slurm_prolog_SOURCES)
-DIST_SOURCES = $(libsched_if_la_SOURCES) $(select_bluegene_la_SOURCES) \
+SOURCES = $(libsched_if64_la_SOURCES) $(select_bluegene_la_SOURCES) \
 	$(sfree_SOURCES) $(slurm_epilog_SOURCES) \
 	$(slurm_prolog_SOURCES)
+DIST_SOURCES = $(libsched_if64_la_SOURCES) \
+	$(select_bluegene_la_SOURCES) $(sfree_SOURCES) \
+	$(slurm_epilog_SOURCES) $(slurm_prolog_SOURCES)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -305,7 +305,7 @@ target_vendor = @target_vendor@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic -lm
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(BG_INCLUDES)
-pkglib_LTLIBRARIES = select_bluegene.la libsched_if.la
+pkglib_LTLIBRARIES = select_bluegene.la libsched_if64.la
 
 # Blue Gene node selection plugin.
 select_bluegene_la_SOURCES = select_bluegene.c \
@@ -322,7 +322,8 @@ select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 select_bluegene_la_LIBADD = ../block_allocator/libbluegene_block_allocator.la 
 
 # MPIRUN dynamic lib.
-libsched_if_la_SOURCES = libsched_if.c 
+libsched_if64_la_SOURCES = libsched_if64.c 
+libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 sfree_LDADD = $(top_builddir)/src/common/libcommon.la \
 		$(top_builddir)/src/api/libslurm.la
 
@@ -401,8 +402,8 @@ clean-pkglibLTLIBRARIES:
 	  echo "rm -f \"$${dir}/so_locations\""; \
 	  rm -f "$${dir}/so_locations"; \
 	done
-libsched_if.la: $(libsched_if_la_OBJECTS) $(libsched_if_la_DEPENDENCIES) 
-	$(LINK) -rpath $(pkglibdir) $(libsched_if_la_LDFLAGS) $(libsched_if_la_OBJECTS) $(libsched_if_la_LIBADD) $(LIBS)
+libsched_if64.la: $(libsched_if64_la_OBJECTS) $(libsched_if64_la_DEPENDENCIES) 
+	$(LINK) -rpath $(pkglibdir) $(libsched_if64_la_LDFLAGS) $(libsched_if64_la_OBJECTS) $(libsched_if64_la_LIBADD) $(LIBS)
 select_bluegene.la: $(select_bluegene_la_OBJECTS) $(select_bluegene_la_DEPENDENCIES) 
 	$(LINK) -rpath $(pkglibdir) $(select_bluegene_la_LDFLAGS) $(select_bluegene_la_OBJECTS) $(select_bluegene_la_LIBADD) $(LIBS)
 install-sbinPROGRAMS: $(sbin_PROGRAMS)
@@ -456,7 +457,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_sys.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bluegene.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsched_if.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsched_if64.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opts.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_bluegene.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sfree.Po@am__quote@
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c
index 42fb5207f49129341f4edd49ce449239c255b8d9..a85eda34c8d7c372c6899a552bfd7ca37af17ee2 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.c
+++ b/src/plugins/select/bluegene/plugin/bg_block_info.c
@@ -227,10 +227,24 @@ extern int update_block_list()
 		name = bg_record->bg_block_id;
 		if ((rc = bridge_get_block_info(name, &block_ptr)) 
 		    != STATUS_OK) {
-			if(rc == INCONSISTENT_DATA
-			   && bluegene_layout_mode == LAYOUT_DYNAMIC)
-				continue;
-			
+			if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+				switch(rc) {
+				case INCONSISTENT_DATA:
+					debug2("got inconsistent data when "
+					       "quering block %s", name);
+					continue;
+					break;
+				case PARTITION_NOT_FOUND:
+					debug("block %s not found, removing "
+					      "from slurm", name);
+					list_remove(itr);
+					destroy_bg_record(bg_record);
+					continue;
+					break;
+				default:
+					break;
+				}
+			}
 			error("bridge_get_block_info(%s): %s", 
 			      name, 
 			      bg_err_str(rc));
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 07b3378b95824b6fc03e2293281d47e8e1370852..3982bbb57a4afa21c4784b1323a3303ed9aa411d 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -408,62 +408,64 @@ try_again:
 	}
 	list_iterator_destroy(itr);
 
-	if(*found_bg_record)
-		goto found_it;
-
-	if(!found && bluegene_layout_mode == LAYOUT_DYNAMIC) {
-		/* 
-		   see if we have already tryed to create this 
-		   size but couldn't make it right now no reason 
-		   to try again 
-		*/
-		slurm_mutex_lock(&request_list_mutex);
-		itr = list_iterator_create(bg_request_list);
-		while ((try_request = list_next(itr)) != NULL) {
-			if(try_request->procs > req_procs) {
-				debug("already tried to create but "
-				      "can't right now.");
-				list_iterator_destroy(itr);
-				slurm_mutex_unlock(&request_list_mutex);
+	/* set the bitmap and do other allocation activities */
+	if (*found_bg_record) {
+		if(!test_only) {
+			if(check_block_bp_states(
+				   (*found_bg_record)->bg_block_id) 
+			   == SLURM_ERROR) {
+				(*found_bg_record)->job_running = -3;
+				(*found_bg_record)->state = RM_PARTITION_ERROR;
 				slurm_mutex_unlock(&block_state_mutex);
-				if(test_only)
-					return SLURM_SUCCESS;
-				else
-					return SLURM_ERROR;
-			}				
+				goto try_again;
+			}
 		}
-		list_iterator_destroy(itr);
-		slurm_mutex_unlock(&request_list_mutex);
+		format_node_name(*found_bg_record, tmp_char);
+	
+		debug("_find_best_block_match %s <%s>", 
+			(*found_bg_record)->bg_block_id, 
+			tmp_char);
+		bit_and(slurm_block_bitmap, (*found_bg_record)->bitmap);
+		slurm_mutex_unlock(&block_state_mutex);
+		return SLURM_SUCCESS;
 	}
 
-	if(bluegene_layout_mode == LAYOUT_OVERLAP 
-	   &&!test_only && created<2 && !*found_bg_record) {
+	/* all these assume that the *found_bg_record is NULL */
+	if(bluegene_layout_mode == LAYOUT_OVERLAP && !test_only && created<2) {
 		created++;
 		slurm_mutex_unlock(&block_state_mutex);
 		goto try_again;
 	}
 		
-	if(!found && test_only && bluegene_layout_mode == LAYOUT_DYNAMIC) {
-		slurm_mutex_unlock(&block_state_mutex);
+	slurm_mutex_unlock(&block_state_mutex);
+	if(bluegene_layout_mode !=  LAYOUT_DYNAMIC)
+		goto not_dynamic;
+	
+	if(!found) {
 		/* 
 		   see if we have already tryed to create this 
-		   size but couldn't make it right now no reason 
+		   size OR GREATER but couldn't make it right now no reason 
 		   to try again 
 		*/
 		slurm_mutex_lock(&request_list_mutex);
 		itr = list_iterator_create(bg_request_list);
-		while ((try_request = list_next(itr)) != NULL) {
-			if(try_request->procs == req_procs) {
-				debug2("already tried to create but "
-				     "can't right now. 2");
+		while ((try_request = list_next(itr))) {
+			if(try_request->procs >= req_procs) {
+				debug("already tried to create but "
+				      "can't right now.");
 				list_iterator_destroy(itr);
 				slurm_mutex_unlock(&request_list_mutex);
-				return SLURM_SUCCESS;
+				if(test_only)
+					return SLURM_SUCCESS;
+				else
+					return SLURM_ERROR;
 			}				
 		}
 		list_iterator_destroy(itr);
 		slurm_mutex_unlock(&request_list_mutex);
-		
+	}
+
+	if(!found && test_only) {
 		for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
 			request.start[i] = start[i];
 			
@@ -522,11 +524,8 @@ try_again:
 			xfree(request.save_name);
 			return SLURM_SUCCESS;
 		}
-	} else if(!*found_bg_record 
-		  && !created 
-		  && bluegene_layout_mode == LAYOUT_DYNAMIC) {
+	} else if(!created) {
 		debug2("going to create %d", target_size);
-		slurm_mutex_unlock(&block_state_mutex);
 		lists_of_lists = list_create(NULL);
 		if(job_ptr->details->req_nodes) {
 			list_append(lists_of_lists, bg_job_block_list);
@@ -545,7 +544,7 @@ try_again:
 				list_append(lists_of_lists, bg_job_block_list);
 		}
 		itr = list_iterator_create(lists_of_lists);
-		while ((temp_list = (List)list_next(itr)) != NULL) {
+		while ((temp_list = (List)list_next(itr))) {
 			created++;
 
 			for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
@@ -580,32 +579,8 @@ try_again:
 		list_iterator_destroy(itr);
 		if(lists_of_lists)
 			list_destroy(lists_of_lists);
-		slurm_mutex_lock(&block_state_mutex);		
-	}
-found_it:
-	/* set the bitmap and do other allocation activities */
-	if (*found_bg_record) {
-		if(!test_only) {
-			if(check_block_bp_states(
-				   (*found_bg_record)->bg_block_id) 
-			   == SLURM_ERROR) {
-				(*found_bg_record)->job_running = -3;
-				(*found_bg_record)->state = RM_PARTITION_ERROR;
-				slurm_mutex_unlock(&block_state_mutex);
-				goto try_again;
-			}
-		}
-		format_node_name(*found_bg_record, tmp_char);
-	
-		debug("_find_best_block_match %s <%s>", 
-			(*found_bg_record)->bg_block_id, 
-			tmp_char);
-		bit_and(slurm_block_bitmap, (*found_bg_record)->bitmap);
-		slurm_mutex_unlock(&block_state_mutex);
-		return SLURM_SUCCESS;
 	}
-		
-	slurm_mutex_unlock(&block_state_mutex);
+not_dynamic:
 	debug("_find_best_block_match none found");
 	return SLURM_ERROR;
 }
diff --git a/src/plugins/select/bluegene/plugin/libsched_if.c b/src/plugins/select/bluegene/plugin/libsched_if64.c
similarity index 96%
rename from src/plugins/select/bluegene/plugin/libsched_if.c
rename to src/plugins/select/bluegene/plugin/libsched_if64.c
index 54bb35708740aa3dfabf7d21b2790dbb3d6d9ee9..b64740eb288194f33b4baf77ba4c2a9c26ea71da 100644
--- a/src/plugins/select/bluegene/plugin/libsched_if.c
+++ b/src/plugins/select/bluegene/plugin/libsched_if64.c
@@ -36,9 +36,11 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
+#include <stdio.h>
 
 int get_parameters(void *params) 
 {
+	printf("YOU ARE OUTSIDE OF SLURM!!!! NOT RUNNING MPIRUN!\n");
 	return -1;
 }
 
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index c1694e7810e841671ce86079d5c377ac202e54a6..edf78d86c0f9a1eaa2ef27068edcbdc02da2557a 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -927,7 +927,16 @@ extern void admin_edit_job(GtkCellRendererText *cell,
 	int stepid = NO_VAL;
 	int column = GPOINTER_TO_INT(g_object_get_data(G_OBJECT(cell), 
 						       "column"));
-	
+#ifdef HAVE_BG
+	uint16_t rotate;
+	uint16_t conn_type;
+	char* token, *delimiter = ",x", *next_ptr;
+	int j;
+	uint16_t geo[SYSTEM_DIMENSIONS];
+	char* geometry_tmp = xstrdup(new_text);
+	char* original_ptr = geometry_tmp;
+#endif
+
 	if(!new_text || !strcmp(new_text, ""))
 		goto no_input;
 
@@ -1073,14 +1082,9 @@ extern void admin_edit_job(GtkCellRendererText *cell,
 		break;
 #ifdef HAVE_BG
 	case SORTID_GEOMETRY:
-		{
-			char* token, *delimiter = ",x", *next_ptr;
-			int j;
-			uint16_t geo[SYSTEM_DIMENSIONS];
-			char* geometry_tmp = xstrdup(new_text);
-			char* original_ptr = geometry_tmp;
-		}
 		token = strtok_r(geometry_tmp, delimiter, &next_ptr);
+		for (j=0; j<SYSTEM_DIMENSIONS; j++)
+			geo[j] = (uint16_t) NO_VAL;
 		for (j=0; j<SYSTEM_DIMENSIONS; j++) {
 			if (token == NULL) {
 				//error("insufficient dimensions in "
@@ -1104,12 +1108,6 @@ extern void admin_edit_job(GtkCellRendererText *cell,
 			goto print_error;
 		}
 		
-		if (rc != 0) {
-			for (j=0; j<SYSTEM_DIMENSIONS; j++)
-				geo[j] = (uint16_t) NO_VAL;
-			exit_code = 1;
-		} else
-			update_cnt++;
 		select_g_set_jobinfo(job_msg.select_jobinfo,
 				     SELECT_DATA_GEOMETRY,
 				     (void *) &geo);
@@ -1117,9 +1115,6 @@ extern void admin_edit_job(GtkCellRendererText *cell,
 		type = "geometry";
 		break;
 	case SORTID_ROTATE:
-		{
-			uint16_t rotate;
-		}
 		if (!strcasecmp(new_text, "yes")) {
 			rotate = 1;
 			temp = "*";
@@ -1134,9 +1129,6 @@ extern void admin_edit_job(GtkCellRendererText *cell,
 		type = "rotate";	
 		break;
 	case SORTID_CONNECTION:
-		{
-			uint16_t conn_type;
-		}
 		if (!strcasecmp(new_text, "torus")) {
 			conn_type = SELECT_TORUS;
 		} else if (!strcasecmp(new_text, "mesh")) {