diff --git a/NEWS b/NEWS
index ec0527851124b1655d345fe5778fe53e669a3a82..6eee814bcd3d01e83feb25c05d6e090fdb514ceb 100644
--- a/NEWS
+++ b/NEWS
@@ -36,7 +36,7 @@ documents those changes that are of interest to users and admins.
 * Changes in SLURM 1.3.8
 ========================
  -- Added PrivateData flags for Users, Usage, and Accounts to Accounting. 
-    If using slurmdbd, set in the slurmdbd.conf file. Ohterwise set in the 
+    If using slurmdbd, set in the slurmdbd.conf file. Otherwise set in the 
     slurm.conf file.  See "man slurm.conf" or "man slurmdbd.conf" for details.
  -- Reduce frequency of resending job kill RPCs. Helpful in the event of 
     network problems or down nodes.
@@ -44,6 +44,12 @@ documents those changes that are of interest to users and admins.
     plus sched/backfill.
  -- For salloc, if no local command is specified, execute the user's default
     shell.
+ -- BLUEGENE - patch to make sure when starting a job blocks required to be
+    freed are checked to make sure no job is running on them.  If one is found
+    we will requeue the new job.  No job will be lost.
+ -- BLUEGENE - Set MPI environment variables from salloc.
+ -- BLUEGENE - Fix threading issue for overlap mode
+ -- Reject batch scripts containing DOS linebreaks.
 
 * Changes in SLURM 1.3.7
 ========================
@@ -494,6 +500,13 @@ documents those changes that are of interest to users and admins.
     Moved existing digital signature logic into new plugin: crypto/openssl.
     Added new support for crypto/munge (available with GPL license).
 
+* Changes in SLURM 1.2.35
+=========================
+ -- Permit SPANK plugins to dynamically register options at runtime base upon
+    configuration or other runtime checks.
+ -- Add "include" keywork to SPANK plugstack.conf file to optionally include
+    other configuration files or directories of configuration files.
+
 * Changes in SLURM 1.2.34
 =========================
  -- Permit the cancellation of a job that is in the process of being 
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 9061ba998283897730bc59ada3579815b032eb7e..b9711d0b1be7f91dda6c50365c2a68c4b62e336f 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -85,6 +85,8 @@ SLURM RPM?</li>
 <li><a href="#slurmdbd">Why should I use the slurmdbd instead of the
 regular database plugins?</li>
 <li><a href="#debug">How can I build SLURM with debugging symbols?</li>
+<li><a href="#state_preserve">How can I easily preserve drained node information 
+between major SLURM updates?</li>
 </ol>
 
 <h2>For Users</h2>
@@ -984,8 +986,21 @@ You want the "-g" option to produce debugging information and
 "-O0" to set the optimization level to zero (off). For example:<br>
 CFLAGS="-g -O0" ./configure ...
 
+<p><a name="state_preserve"><b>30. How can I easily preserve drained node 
+information between major SLURM updates?</b><br>
+Major SLURM updates generally have changes in the state save files and 
+communication protocols, so a cold-start (without state) is generally 
+required. If you have nodes in a DRAIN state and want to preserve that
+information, you can easily build a script to preserve that information
+using the <i>sinfo</i> command. The following command line will report the 
+<i>Reason</i> field for every node in a DRAIN state and write the output 
+in a form that can be executed later to restore state.
+<pre>
+sinfo -t drain -h -o "scontrol update nodename='%N' state=drain reason='%E'"
+</pre>
+
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 August 2008</p>
+<p style="text-align:center;">Last modified 2 September 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/testimonials.shtml b/doc/html/testimonials.shtml
index 0ee1daef531ef4ef8608e187f46de75e3c359ff7..ebc3c819f05ed004cbdfb22f4f1fa166df7dd4f8 100644
--- a/doc/html/testimonials.shtml
+++ b/doc/html/testimonials.shtml
@@ -112,6 +112,6 @@ Bill Celmaster, XC Program Manager, Hewlett-Packard Company
 </i>
 <HR SIZE=4>
 
-<p style="text-align:center;">Last modified 289 July 2008</p>
+<p style="text-align:center;">Last modified 28 July 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index b0d91c25dc807532fae5013e806f3205e9d56f23..34343bd2e7d20ec0d71558f253163d7302222a5f 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -1,4 +1,4 @@
-.TH SCONTROL "1" "December 2007" "scontrol 1.3" "Slurm components"
+.TH SCONTROL "1" "August 2008" "scontrol 1.3" "Slurm components"
 
 .SH "NAME"
 scontrol \- Used view and modify Slurm configuration and state.
@@ -43,8 +43,9 @@ Print information one line per record.
 Print no warning or informational messages, only fatal error messages.
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Print detailed event logging. This includes time\-stamps on data structures, 
-record counts, etc.
+Print detailed event logging. Multiple \fB\-v\fR's will further increase 
+the verbosity of logging. By default only errors will be displayed.
+
 .TP
 \fB\-V\fR , \fB\-\-version\fR
 Print version information and exit.
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 129669c3a4c31c4b5d0acabaa27dfe32eb66f215..d0fe6ad4f0d0fe68102fdbb3aaf3b7e109e87865 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -1,4 +1,4 @@
-.TH SRUN "1" "July 2008" "srun 1.3" "slurm components"
+.TH SRUN "1" "August 2008" "srun 1.3" "slurm components"
 
 .SH "NAME"
 srun \- run parallel jobs
@@ -893,8 +893,8 @@ Display version information and exit.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-verbose operation. Multiple \fB\-v\fR's will further increase the verbosity of
-\fBsrun\fR. By default only errors will be displayed.
+Print detailed event logging. Multiple \fB\-v\fR's will further increase 
+the verbosity of logging. By default only errors will be displayed.
 
 .TP
 \fB\-W\fR, \fB\-\-wait\fR=\fIseconds\fR
diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8
index 2ee7e5886d274048c78f9c153b69160b00340476..1599ec8cb7052d50b827f8c9c6070ae83ff569d0 100644
--- a/doc/man/man8/spank.8
+++ b/doc/man/man8/spank.8
@@ -169,10 +169,8 @@ option is specified, its value is forwarded and registered with
 the plugin on the remote side. In this way, \fBSPANK\fR plugins
 may dynamically provide new options and functionality to SLURM.
 .LP
-In order to register new options with SLURM, a plugin must 
-define symbol \fBspank_options\fR which is a pointer to the
-first element of an array of \fBstruct spank_option\fR declared
-in \fB<slurm/spank.h>\fR as
+Each option registered by a plugin to SLURM takes the form of
+a \fBstruct spank_option\fR which is declared in \fB<slurm/spank.h>\fR as
 .nf
 
    struct spank_option {
@@ -185,7 +183,7 @@ in \fB<slurm/spank.h>\fR as
    };
 
 .fi
-.LP
+
 Where
 .TP
 .I name
@@ -221,10 +219,28 @@ struct, \fIoptarg\fR is the supplied argument if applicable, and \fIremote\fR
 is 0 if the function is being called from the "local" host 
 (e.g. \fBsrun\fR or \fBsbatch\fR) or 1 from the "remote" host (\fBslurmd\fR).
 .LP
-The last element of the array must filled with zeros. A
+There are two methods by which the plugin can register these options
+with SLURM. The simplest method is for the plugin to define an array
+of \fBstruct spank_option\fR with the symbol name \fBspank_options\fR. 
+This final element in the options table must be filled with zeros. A
 \fBSPANK_OPTIONS_TABLE_END\fR macro is defined in \fB<slurm/spank.h>\fR
 for this purpose.
 .LP
+Plugin options may also be dynamically registered with SLURM using 
+the \fBspank_option_register\fR function. This function is only valid
+when called from the plugin's \fBslurm_spank_init\fR handler, and 
+registers one option at a time. The prototype is
+.nf
+
+   spank_err_t spank_option_register (spank_t sp, 
+		   struct spank_option *opt);
+
+.fi
+This function will return \fBESPANK_SUCCESS\fR on successful registration
+of an option, or \fBESPANK_BAD_ARG\fR for errors including invalid spank_t
+handle, or when the function is not called from the \fBslurm_spank_init\fR
+function.
+.LP
 When an option is provided by the user on the local side, \fBSLURM\fR will 
 immediately invoke the option's callback with \fIremote\fR=0. This
 is meant for the plugin to do local sanity checking of the option before
@@ -276,6 +292,20 @@ be placed into the config file. The plugins will simply be called
 in order, one after the other, and appropriate action taken on
 failure given that state of the plugin's \fIoptional\fR flag.
 .LP
+Additional config files or directories of config files may be included
+in \fBplugstack.conf\fR with the \fBinclude\fR keyword. The \fBinclude\fR
+keyword must appear on its own line, and takes a glob as its parameter,
+so multiple files may be included from one \fBinclude\fR line. For
+example, the following syntax will load all config files in the
+/etc/slurm/plugstack.conf.d directory, in local collation order:
+.nf
+
+  include /etc/slurm/plugstack.conf.d/*
+
+.fi
+which might be considered a more flexible method for building up
+a spank plugin stack.
+.LP
 The \fBSPANK\fR config file is re\-read on each job launch, so editing
 the config file will not affect running jobs. However care should
 be taken so that a partially edited config file is not read by a
diff --git a/slurm/spank.h b/slurm/spank.h
index 5ef0a5eb10a59a783812b57e59c10369f859d282..3d59ba1747083d6c261ab9aadfc0ee05b3faee6c 100644
--- a/slurm/spank.h
+++ b/slurm/spank.h
@@ -183,6 +183,8 @@ struct spank_option {
 
 /*
  *  Plugin may declare spank_options option table:
+ *   [Note: options may also be declared with spank_option_register(),
+ *    defined below.]
  */
 extern struct spank_option spank_options [];
 
@@ -222,6 +224,17 @@ int spank_symbol_supported (const char *symbol);
  */
 int spank_remote (spank_t spank);
 
+/*
+ *  Register a plugin-provided option dynamically. This function
+ *   is only valid when called from slurm_spank_init(), and must
+ *   be called in both remote (slurmd) and local (srun) contexts.
+ *   May be called multiple times to register many options.
+ *
+ *  Returns ESPANK_SUCCESS on successful registration of the option
+ *   or ESPANK_BAD_ARG if not called from slurm_spank_init().
+ */
+spank_err_t spank_option_register (spank_t spank, struct spank_option *opt);
+
 
 /*  Get the value for the current job or task item specified, 
  *   storing the result in the subsequent pointer argument(s).
diff --git a/src/api/partition_info.c b/src/api/partition_info.c
index 416d43440e46ddcb692f6eaf21d27233591e61ad..9bc41dd767d8aac9c68d294b7b829901d4b9c0dd 100644
--- a/src/api/partition_info.c
+++ b/src/api/partition_info.c
@@ -114,11 +114,12 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 #ifdef HAVE_BG
 	convert_num_unit((float)part_ptr->total_nodes, tmp1, sizeof(tmp1),
 			 UNIT_NONE);
+	convert_num_unit((float)part_ptr->total_cpus, tmp2, sizeof(tmp2),
+			 UNIT_NONE);
 #else
 	snprintf(tmp1, sizeof(tmp1), "%u", part_ptr->total_nodes);
+	snprintf(tmp2, sizeof(tmp2), "%u", part_ptr->total_cpus);
 #endif
-	convert_num_unit((float)part_ptr->total_cpus, tmp2, sizeof(tmp2),
-			 UNIT_NONE);
 	snprintf(tmp_line, sizeof(tmp_line),
 		 "PartitionName=%s TotalNodes=%s TotalCPUs=%s ", 
 		 part_ptr->name, tmp1, tmp2);
diff --git a/src/common/env.c b/src/common/env.c
index ae46cd83592a7a3d7ada0a6945b358a1b12b5040..828c07e8e8a0c2a897fc67ea4ae67b30ce9d01d0 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -766,6 +766,7 @@ extern char *uint32_compressed_to_str(uint32_t array_len,
  *	SLURM_JOB_NODELIST
  *	SLURM_JOB_CPUS_PER_NODE
  *	LOADLBATCH (AIX only)
+ *	MPIRUN_PARTITION, MPIRUN_NOFREE, and MPIRUN_NOALLOCATE (BGL only)
  *
  * Sets OBSOLETE variables:
  *	SLURM_JOBID
@@ -777,7 +778,7 @@ extern char *uint32_compressed_to_str(uint32_t array_len,
 void
 env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc)
 {
-	char *tmp;
+	char *bgl_part_id = NULL, *tmp;
 
 	env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u", alloc->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NUM_NODES", "%u",
@@ -794,6 +795,16 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc)
 	env_array_overwrite(dest, "LOADLBATCH", "yes");
 #endif
 
+	/* BlueGene only */
+	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_BLOCK_ID,
+			     &bgl_part_id);
+	if (bgl_part_id) {
+		env_array_overwrite_fmt(dest, "MPIRUN_PARTITION", "%s",
+					bgl_part_id);
+		env_array_overwrite_fmt(dest, "MPIRUN_NOFREE", "%d", 1);
+		env_array_overwrite_fmt(dest, "MPIRUN_NOALLOCATE", "%d", 1);
+	}
+
 	/* obsolete */
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", alloc->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", alloc->node_cnt);
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index 62cb45ba390e43cef7c9ae32059af7a76b33e29b..9776df4898e5a59b31cc6eaa4818e437e6688671 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -41,6 +41,8 @@
 #include <string.h>
 #include <ctype.h>
 #include <stdlib.h>
+#include <libgen.h>
+#include <glob.h>
 
 #include "src/common/plugin.h"
 #include "src/common/xmalloc.h"
@@ -60,6 +62,7 @@
 
 #define REQUIRED "required"
 #define OPTIONAL "optional"
+#define INCLUDE  "include"
 
 struct spank_plugin_operations {
 	spank_f *init;
@@ -145,6 +148,7 @@ typedef enum step_fn {
 struct spank_handle {
 #   define SPANK_MAGIC 0x00a5a500
 	int                  magic;  /* Magic identifier to ensure validity. */
+	struct spank_plugin *plugin; /* Current plugin using handle          */
 	spank_handle_type_t  type;   /* remote(slurmd) || local(srun)        */
 	step_fn_t            phase;  /* Which spank fn are we called from?   */
 	void               * job;    /* Reference to current srun|slurmd job */
@@ -168,7 +172,6 @@ static const char * default_spank_path = NULL;
  */
 static int _spank_plugin_options_cache(struct spank_plugin *p);
 
-
 static void _argv_append(char ***argv, int ac, const char *newarg)
 {
 	*argv = xrealloc(*argv, (++ac + 1) * sizeof(char *));
@@ -177,9 +180,32 @@ static void _argv_append(char ***argv, int ac, const char *newarg)
 	return;
 }
 
+typedef enum {
+   CF_ERROR = 0,
+   CF_OPTIONAL,
+   CF_REQUIRED,
+   CF_INCLUDE,
+} cf_line_t;
+
+static cf_line_t _plugin_stack_line_type (const char *str)
+{
+	if (strcmp(str, REQUIRED) == 0)
+		return (CF_REQUIRED);
+	else if (strcmp(str, OPTIONAL) == 0)
+		return (CF_OPTIONAL);
+	else if (strcmp(str, INCLUDE) == 0)
+		return (CF_INCLUDE);
+	else {
+		error("spank: Invalid option \"%s\". Must be %s, %s or %s",
+		     str, REQUIRED, OPTIONAL, INCLUDE);
+		return (CF_ERROR);
+	}
+}
+
+
 static int
 _plugin_stack_parse_line(char *line, char **plugin, int *acp, char ***argv,
-			 bool * required)
+			 cf_line_t * type)
 {
 	int ac;
 	const char *separators = " \t\n";
@@ -205,19 +231,10 @@ _plugin_stack_parse_line(char *line, char **plugin, int *acp, char ***argv,
 		*s = '\0';
 
 	if (!(option = strtok_r(line, separators, &sp)))
-		return 0;
-
-	if (strncmp(option, REQUIRED, strlen(option)) == 0) {
-		*required = true;
-	} 
-	else if (strncmp(option, OPTIONAL, strlen(option)) == 0) {
-		*required = false;
-	} 
-	else {
-		error("spank: Invalid option \"%s\". Must be either %s or %s",
-		     option, REQUIRED, OPTIONAL);
+		return (0);
+
+	if (((*type) = _plugin_stack_line_type(option)) == CF_ERROR) 
 		return (-1);
-	}
 
 	if (!(path = strtok_r(NULL, separators, &sp)))
 		return (-1);
@@ -325,24 +342,27 @@ _spank_plugin_find (const char *path, const char *file)
 	return (NULL);
 }
 
+static int _spank_conf_include (const char *, int, const char *, List *);
+
 static int
-_spank_stack_process_line(const char *file, int line, char *buf,
-			  struct spank_plugin **plugin)
+_spank_stack_process_line(const char *file, int line, char *buf, List *stackp)
 {
 	char **argv;
 	int ac;
 	char *path;
-	bool required = FALSE;
+	cf_line_t type = CF_REQUIRED;
+	bool required;
 
 	struct spank_plugin *p;
 
-	*plugin = NULL;
-
-	if (_plugin_stack_parse_line(buf, &path, &ac, &argv, &required) < 0) {
+	if (_plugin_stack_parse_line(buf, &path, &ac, &argv, &type) < 0) {
 		error("spank: %s:%d: Invalid line. Ignoring.", file, line);
 		return (0);
 	}
 
+	if (type == CF_INCLUDE)
+		return (_spank_conf_include (file, line, path, stackp));
+
 	if (path == NULL)	/* No plugin listed on this line */
 		return (0);
 
@@ -355,21 +375,31 @@ _spank_stack_process_line(const char *file, int line, char *buf,
 		}
 	}
 
+	required = (type == CF_REQUIRED);
 	if (!(p = _spank_plugin_create(path, ac, argv, required))) {
 		if (required)
-			error ("spank: %s:%d: Failed to load plugin %s. Aborting.",
-					file, line, path);
+			error ("spank: %s:%d:" 
+			       " Failed to load plugin %s. Aborting.",
+			       file, line, path);
 		else
-			verbose ("spank: %s:%d: Failed to load optional plugin %s. Ignored.",
-					file, line, path);
+			verbose ("spank: %s:%d:" 
+				 "Failed to load optional plugin %s. Ignored.",
+				 file, line, path);
 		return (required ? -1 : 0);
 	}
+	if (*stackp == NULL)
+		*stackp = list_create((ListDelF) _spank_plugin_destroy);
+
+	verbose ("spank: %s:%d: Loaded plugin %s", 
+			file, line, xbasename (p->fq_path));
 
-	*plugin = p;
+	list_append (*stackp, p);
+	_spank_plugin_options_cache(p);
 
 	return (0);
 }
 
+
 static int _spank_stack_create(const char *path, List * listp)
 {
 	int line;
@@ -390,24 +420,8 @@ static int _spank_stack_create(const char *path, List * listp)
 
 	line = 1;
 	while (fgets(buf, sizeof(buf), fp)) {
-		struct spank_plugin *p;
-
-		if (_spank_stack_process_line(path, line, buf, &p) < 0)
+		if (_spank_stack_process_line(path, line, buf, listp) < 0)
 			goto fail_immediately;
-
-		if (p == NULL)
-			continue;
-
-		if (*listp == NULL)
-			*listp =
-			    list_create((ListDelF) _spank_plugin_destroy);
-
-		verbose("spank: loaded plugin %s\n",
-			xbasename(p->fq_path));
-		list_append(*listp, p);
-
-		_spank_plugin_options_cache(p);
-
 		line++;
 	}
 
@@ -423,12 +437,67 @@ static int _spank_stack_create(const char *path, List * listp)
 	return (-1);
 }
 
+static int
+_spank_conf_include (const char *file, int lineno, const char *pattern,
+		List *stackp)
+{
+	int rc = 0;
+	glob_t gl;
+	size_t i;
+	char *copy = NULL;
+
+	if (pattern == NULL) {
+		error ("%s: %d: Invalid include directive", file, lineno);
+		return (SLURM_ERROR);
+	}
+
+	if (pattern[0] != '/') { 
+		char *dirc = xstrdup (file);
+		char *dname = dirname (dirc);
+
+		if (dname != NULL)  {
+			xstrfmtcat (copy, "%s/%s", dname, pattern);
+			pattern = copy;
+		}
+		xfree (dirc);
+	}
+
+	verbose ("%s: %d: include \"%s\"", file, lineno, pattern);
+	
+	rc = glob (pattern, 0, NULL, &gl);
+	switch (rc) {
+	  case 0:
+	  	for (i = 0; i < gl.gl_pathc; i++) {
+			rc = _spank_stack_create (gl.gl_pathv[i], stackp);
+			if (rc < 0) 
+				break;
+		}
+	  	break;
+	  case GLOB_NOMATCH:
+		break;
+	  case GLOB_NOSPACE:
+		errno = ENOMEM;
+	  case GLOB_ABORTED:
+		verbose ("%s:%d: cannot read dir %s: %m",
+			file, lineno, pattern);
+		break;
+	  default:
+		error ("Unknown glob(3) return code = %d", rc);
+		break;
+	}
+
+	xfree (copy);
+	globfree (&gl);
+	return (rc);
+}
+
 static int
 _spank_handle_init(struct spank_handle *spank, void * arg,
 		   int taskid, step_fn_t fn)
 {
 	memset(spank, 0, sizeof(*spank));
 	spank->magic = SPANK_MAGIC;
+	spank->plugin = NULL;
 
 	spank->phase = fn;
 
@@ -494,6 +563,8 @@ static int _do_call_stack(step_fn_t type, void * job, int taskid)
 	while ((sp = list_next(i))) {
 		const char *name = xbasename(sp->fq_path);
 
+		spank->plugin = sp;
+
 		switch (type) {
 		case SPANK_INIT:
 			if (sp->ops.init) {
@@ -656,6 +727,33 @@ static int _spank_next_option_val(void)
 	return (optval);
 }
 
+static struct spank_option * _spank_option_copy(struct spank_option *opt)
+{
+	struct spank_option *copy = xmalloc (sizeof (*copy));
+
+	memset (copy, 0, sizeof (*copy));
+
+	copy->name = xstrdup (opt->name);
+	copy->has_arg = opt->has_arg;
+	copy->val = opt->val;
+	copy->cb = opt->cb;
+
+	if (opt->arginfo)
+		copy->arginfo = xstrdup (opt->arginfo);
+	if (opt->usage)
+		copy->usage = xstrdup (opt->usage);
+
+	return (copy);
+}
+
+static void _spank_option_destroy(struct spank_option *opt)
+{
+	xfree (opt->name);
+	xfree (opt->arginfo);
+	xfree (opt->usage);
+	xfree (opt);
+}
+
 static struct spank_plugin_opt *_spank_plugin_opt_create(struct
 							 spank_plugin *p,
 							 struct
@@ -663,7 +761,7 @@ static struct spank_plugin_opt *_spank_plugin_opt_create(struct
 							 int disabled)
 {
 	struct spank_plugin_opt *spopt = xmalloc(sizeof(*spopt));
-	spopt->opt = opt;
+	spopt->opt = _spank_option_copy (opt);
 	spopt->plugin = p;
 	spopt->optval = _spank_next_option_val();
 	spopt->found = 0;
@@ -676,6 +774,7 @@ static struct spank_plugin_opt *_spank_plugin_opt_create(struct
 
 void _spank_plugin_opt_destroy(struct spank_plugin_opt *spopt)
 {
+	_spank_option_destroy (spopt->opt);
 	xfree(spopt->optarg);
 	xfree(spopt);
 }
@@ -690,9 +789,56 @@ static int _opt_by_name(struct spank_plugin_opt *opt, char *optname)
 	return (strcmp(opt->opt->name, optname) == 0);
 }
 
-static int _spank_plugin_options_cache(struct spank_plugin *p)
+static int
+_spank_option_register(struct spank_plugin *p, struct spank_option *opt)
 {
 	int disabled = 0;
+	struct spank_plugin_opt *spopt;
+
+	spopt = list_find_first(option_cache, 
+			(ListFindF) _opt_by_name, opt->name);
+
+	if (spopt) {
+		struct spank_plugin *q = spopt->plugin;
+		info("spank: option \"%s\" provided by both %s and %s", 
+				opt->name, xbasename(p->fq_path), 
+				xbasename(q->fq_path));
+		/*
+		 *  Disable this option, but still cache it, in case
+		 *    options are loaded in a different order on the 
+		 *    remote side.
+		 */
+		disabled = 1;
+	}
+
+	if ((strlen(opt->name) > SPANK_OPTION_MAXLEN)) {
+		error("spank: option \"%s\" provided by %s too long. Ignoring.",
+			       	opt->name, p->name);
+		return (ESPANK_NOSPACE);
+	}
+
+	verbose("SPANK: appending plugin option \"%s\"\n", opt->name);
+	list_append(option_cache, _spank_plugin_opt_create(p, opt, disabled));
+
+	return (ESPANK_SUCCESS);
+}
+
+spank_err_t spank_option_register(spank_t sp, struct spank_option *opt)
+{
+	if (sp->phase != SPANK_INIT)
+		return (ESPANK_BAD_ARG);
+
+	if (!sp->plugin)
+		error ("Uh, oh, no current plugin!");
+
+	if (!opt || !opt->name || !opt->usage)
+		return (ESPANK_BAD_ARG);
+
+	return (_spank_option_register(sp->plugin, opt));
+}
+
+static int _spank_plugin_options_cache(struct spank_plugin *p)
+{
 	struct spank_option *opt = p->opts;
 
 	if ((opt == NULL) || opt->name == NULL)
@@ -703,38 +849,8 @@ static int _spank_plugin_options_cache(struct spank_plugin *p)
 		    list_create((ListDelF) _spank_plugin_opt_destroy);
 	}
 
-	for (; opt && opt->name != NULL; opt++) {
-		struct spank_plugin_opt *spopt;
-
-		spopt =
-		    list_find_first(option_cache, (ListFindF) _opt_by_name,
-				    opt->name);
-		if (spopt) {
-			struct spank_plugin *q = spopt->plugin;
-			info("spank: option \"%s\" "
-			     "provided by both %s and %s", 
-			     opt->name, xbasename(p->fq_path), 
-			     xbasename(q->fq_path));
-			/*
-			 *  Disable this option, but still cache it, in case
-			 *    options are loaded in a different order on the 
-			 *    remote side.
-			 */
-			disabled = 1;
-		}
-
-		if ((strlen(opt->name) > SPANK_OPTION_MAXLEN)) {
-			error
-			    ("spank: option \"%s\" provided by %s too long."
-			     " Ignoring.", opt->name, p->name);
-			continue;
-		}
-
-		verbose("SPANK: appending plugin option \"%s\"\n",
-			opt->name);
-		list_append(option_cache,
-			    _spank_plugin_opt_create(p, opt, disabled));
-	}
+	for (; opt && opt->name != NULL; opt++)
+		_spank_option_register(p, opt);
 
 	return (0);
 }
diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c
index adb19f2ceeff12c4e16124a4e121d9772c649fb6..d16960ab434e3bca216709026355a023fe6df4ac 100644
--- a/src/common/slurm_protocol_socket_implementation.c
+++ b/src/common/slurm_protocol_socket_implementation.c
@@ -494,6 +494,8 @@ slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry)
 {
 	int retry_cnt;
 	slurm_fd fd;
+	uint16_t port;
+	char ip[32];
 
 	if ( (addr->sin_family == 0) || (addr->sin_port  == 0) ) {
 		error("Error connecting, bad data: family = %u, port = %u",
@@ -510,12 +512,15 @@ slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry)
 		}
 
 		if (retry_cnt) {
-			if (retry_cnt == 1)
-				debug3("Error connecting, picking new stream port");
+			if (retry_cnt == 1) {
+				debug3("Error connecting, "
+				       "picking new stream port");
+			}
 			_sock_bind_wild(fd);
 		}
 
-		rc = _slurm_connect(fd, (struct sockaddr const *)addr, sizeof(*addr));
+		rc = _slurm_connect(fd, (struct sockaddr const *)addr, 
+				    sizeof(*addr));
 		if (rc >= 0)		    /* success */
 			break;
 		if ((errno != ECONNREFUSED) || 
@@ -531,7 +536,9 @@ slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry)
 	return fd;
 
     error:
-	debug2("Error connecting slurm stream socket: %m");
+	slurm_get_ip_str(addr, &port, ip, sizeof(ip));
+	debug2("Error connecting slurm stream socket at %s:%d: %m",
+	       ip, ntohs(port));
 	if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
 		_slurm_close_stream(fd);	/* try again */
 	return SLURM_SOCKET_ERROR;
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index a24d3e45fa625c8b87bc22db0de435367ea98eaf..d77c5ea2f379b35e243358277d41749e4cad0284 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -869,15 +869,9 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 		return rc;
 	}
 
-	if(!has_jobs)
-		query = xstrdup_printf(
-			"delete from %s where creation_time>%d && (%s);"
-			"delete from %s where creation_time>%d && (%s);"
-			"delete from %s where creation_time>%d && (%s);",
-			assoc_day_table, day_old, loc_assoc_char,
-			assoc_hour_table, day_old, loc_assoc_char,
-			assoc_month_table, day_old, loc_assoc_char);
-
+	/* We should not have to delete from usage table, only flag since we
+	 * only delete things that are typos.
+	 */ 
 	xstrfmtcat(query,
 		   "update %s set mod_time=%d, deleted=1 where (%s);"
 		   "update %s set mod_time=%d, deleted=1 where (%s);"
@@ -2212,16 +2206,6 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		return SLURM_ERROR;
 
 	assoc_list = list_create(destroy_acct_association_rec);
-	assoc = xmalloc(sizeof(acct_association_rec_t));
-	list_append(assoc_list, assoc);
-
-	assoc->user = xstrdup("root");
-	assoc->acct = xstrdup("root");
-	assoc->fairshare = NO_VAL;
-	assoc->max_cpu_secs_per_job = NO_VAL;
-	assoc->max_jobs = NO_VAL;
-	assoc->max_nodes_per_job = NO_VAL;
-	assoc->max_wall_duration_per_job = NO_VAL;
 
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(cluster_list);
@@ -2370,10 +2354,21 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 			added++;
 
 		/* Add user root by default to run from the root
-		 * association 
+		 * association.  This gets popped off so we need to
+		 * readd it every time here. 
 		 */
-		xfree(assoc->cluster);
+		assoc = xmalloc(sizeof(acct_association_rec_t));
+		list_append(assoc_list, assoc);
+		
 		assoc->cluster = xstrdup(object->name);
+		assoc->user = xstrdup("root");
+		assoc->acct = xstrdup("root");
+		assoc->fairshare = NO_VAL;
+		assoc->max_cpu_secs_per_job = NO_VAL;
+		assoc->max_jobs = NO_VAL;
+		assoc->max_nodes_per_job = NO_VAL;
+		assoc->max_wall_duration_per_job = NO_VAL;
+
 		if(acct_storage_p_add_associations(mysql_conn, uid, assoc_list)
 		   == SLURM_ERROR) {
 			error("Problem adding root user association");
@@ -4366,7 +4361,6 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 	int set = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	int day_old = now - DELETE_SEC_BACK;
 
 	if(!cluster_cond) {
 		error("we need something to change");
@@ -4430,13 +4424,8 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 	}
 	xfree(query);
 
-	/* if this is a cluster update the machine usage tables as well */
-	query = xstrdup_printf("delete from %s where creation_time>%d && (%s);"
-			       "delete from %s where creation_time>%d && (%s);"
-			       "delete from %s where creation_time>%d && (%s);",
-			       cluster_day_table, day_old, assoc_char,
-			       cluster_hour_table, day_old, assoc_char,
-			       cluster_month_table, day_old, assoc_char);
+	/* We should not need to delete any cluster usage just set it
+	 * to deleted */
 	xstrfmtcat(query,
 		   "update %s set mod_time=%d, deleted=1 where (%s);"
 		   "update %s set mod_time=%d, deleted=1 where (%s);"
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index dcce5561fede3110b1b7eefa937ec843d5433cbd..6001c2ac07fb06dba79e9e85d0a399ec4b77ffb8 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -101,19 +101,26 @@ s_p_options_t bg_conf_file_options[] = {
 #ifdef HAVE_BG_FILES
 /** */
 static void _bp_map_list_del(void *object);
+
+/** */
 static int _port_enum(int port);
+
 #endif
 /* */
 static int _check_for_options(ba_request_t* ba_request); 
+
 /* */
 static int _append_geo(int *geo, List geos, int rotate);
+
 /* */
 static int _fill_in_coords(List results, List start_list, 
 			   int *geometry, int conn_type);
+
 /* */
 static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 			  ba_switch_t *mark_switch, 
 			  int source, int dim);
+
 /* */
 static int _find_yz_path(ba_node_t *ba_node, int *first, 
 			 int *geometry, int conn_type);
@@ -145,26 +152,33 @@ static void _delete_path_list(void *object);
 /* find the first block match in the system */
 static int _find_match(ba_request_t* ba_request, List results);
 
+/** */
 static bool _node_used(ba_node_t* ba_node, int *geometry);
 
 /* */
 static void _switch_config(ba_node_t* source, ba_node_t* target, int dim, 
 			   int port_src, int port_tar);
+
 /* */
 static int _set_external_wires(int dim, int count, ba_node_t* source, 
 				ba_node_t* target);
+
 /* */
 static char *_set_internal_wires(List nodes, int size, int conn_type);
+
 /* */
 static int _find_x_path(List results, ba_node_t *ba_node, 
 			int *start, int *first, 
 			int *geometry, int found, int conn_type);
+
 /* */
 static int _find_x_path2(List results, ba_node_t *ba_node, 
 			 int *start, int *first, 
 			 int *geometry, int found, int conn_type);
+
 /* */
 static int _remove_node(List results, int *node_tar);
+
 /* */
 static int _find_next_free_using_port_2(ba_switch_t *curr_switch, 
 					int source_port, 
@@ -372,12 +386,39 @@ extern void destroy_ba_node(void *ptr)
 	}
 }
 
-/**
+/*
  * create a block request.  Note that if the geometry is given,
- * then size is ignored.  
+ * then size is ignored.  If elongate is true, the algorithm will try
+ * to fit that a block of cubic shape and then it will try other
+ * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1). 
  * 
  * IN/OUT - ba_request: structure to allocate and fill in.  
- * return SUCCESS of operation.
+ * 
+ * ALL below IN's need to be set within the ba_request before the call
+ * if you want them to be used.
+ * ALL below OUT's are set and returned within the ba_request.
+ * IN - avail_node_bitmap: bitmap of usable midplanes.
+ * IN - blrtsimage: BlrtsImage for this block if not default
+ * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
+ * IN - elongate: if true, will try to fit different geometries of
+ *      same size requests
+ * IN/OUT - geometry: requested/returned geometry of block
+ * IN - linuximage: LinuxImage for this block if not default
+ * IN - mloaderimage: MLoaderImage for this block if not default
+ * IN - nodecards: Number of nodecards in each block in request only
+ *      used of small block allocations.
+ * OUT - passthroughs: if there were passthroughs used in the
+ *       generation of the block.
+ * IN - procs: Number of real processors requested
+ * IN - quarters: Number of midplane quarters in each block in request only
+ *      used of small block allocations.
+ * IN - RamDiskimage: RamDiskImage for this block if not default
+ * IN - rotate: if true, allows rotation of block during fit
+ * OUT - save_name: hostlist of midplanes used in block
+ * IN/OUT - size: requested/returned count of midplanes in block
+ * IN - start: geo location of where to start the allocation
+ * IN - start_req: if set use the start variable to start at
+ * return success of allocation/validation of params
  */
 extern int new_ba_request(ba_request_t* ba_request)
 {
@@ -967,6 +1008,11 @@ node_info_error:
 	init_grid(node_info_ptr);
 }
 
+/* If emulating a system set up a known configuration for wires in a
+ * system of the size given.
+ * If a real bluegene system, query the system and get all wiring
+ * information of the system.
+ */
 extern void init_wires()
 {
 	int x, y, z, i;
@@ -1037,10 +1083,12 @@ extern void ba_fini()
 }
 
 
-/** 
- * set the node in the internal configuration as unusable
+/* 
+ * set the node in the internal configuration as in, or not in use,
+ * along with the current state of the node.
  * 
- * IN ba_node: ba_node_t to put down
+ * IN ba_node: ba_node_t to update state
+ * IN state: new state of ba_node_t
  */
 extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state)
 {
@@ -1070,11 +1118,12 @@ extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state)
 		ba_node->used = false;
 	ba_node->state = state;
 }
-/** 
- * copy info from a ba_node
+
+/* 
+ * copy info from a ba_node, a direct memcpy of the ba_node_t
  * 
  * IN ba_node: node to be copied
- * OUT ba_node_t *: copied info must be freed with destroy_ba_node
+ * Returned ba_node_t *: copied info must be freed with destroy_ba_node
  */
 extern ba_node_t *ba_copy_node(ba_node_t *ba_node)
 {
@@ -1083,7 +1132,74 @@ extern ba_node_t *ba_copy_node(ba_node_t *ba_node)
 	memcpy(new_ba_node, ba_node, sizeof(ba_node_t));
 	return new_ba_node;
 }
-/** 
+
+/* 
+ * copy the path of the nodes given
+ * 
+ * IN nodes List of ba_node_t *'s: nodes to be copied
+ * OUT dest_nodes List of ba_node_t *'s: filled in list of nodes
+ * wiring.
+ * Return on success SLURM_SUCCESS, on error SLURM_ERROR
+ */
+extern int copy_node_path(List nodes, List *dest_nodes)
+{
+	int rc = SLURM_ERROR;
+	
+#ifdef HAVE_BG
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ba_node_t *ba_node = NULL, *new_ba_node = NULL;
+	int dim;
+	ba_switch_t *curr_switch = NULL, *new_switch = NULL; 
+	
+	if(!nodes)
+		return SLURM_ERROR;
+	if(!*dest_nodes)
+		*dest_nodes = list_create(destroy_ba_node);
+
+	itr = list_iterator_create(nodes);
+	while((ba_node = list_next(itr))) {
+		itr2 = list_iterator_create(*dest_nodes);
+		while((new_ba_node = list_next(itr2))) {
+			if (ba_node->coord[X] == new_ba_node->coord[X] &&
+			    ba_node->coord[Y] == new_ba_node->coord[Y] &&
+			    ba_node->coord[Z] == new_ba_node->coord[Z]) 
+				break;	/* we found it */
+		}
+		list_iterator_destroy(itr2);
+	
+		if(!new_ba_node) {
+			debug2("adding %c%c%c as a new node",
+			       alpha_num[ba_node->coord[X]], 
+			       alpha_num[ba_node->coord[Y]],
+			       alpha_num[ba_node->coord[Z]]);
+			new_ba_node = ba_copy_node(ba_node);
+			_new_ba_node(new_ba_node, ba_node->coord, false);
+			list_push(*dest_nodes, new_ba_node);
+			
+		}
+		new_ba_node->used = true;
+		for(dim=0;dim<BA_SYSTEM_DIMENSIONS;dim++) {		
+			curr_switch = &ba_node->axis_switch[dim];
+			new_switch = &new_ba_node->axis_switch[dim];
+			if(curr_switch->int_wire[0].used) {
+				if(!_copy_the_path(*dest_nodes, 
+						   curr_switch, new_switch,
+						   0, dim)) {
+					rc = SLURM_ERROR;
+					break;
+				}
+			}
+		}
+		
+	}
+	list_iterator_destroy(itr);
+	rc = SLURM_SUCCESS;
+#endif	
+	return rc;
+}
+
+/* 
  * Try to allocate a block.
  * 
  * IN - ba_request: allocation request
@@ -1114,10 +1230,9 @@ extern int allocate_block(ba_request_t* ba_request, List results)
 }
 
 
-/** 
- * Doh!  Admin made a boo boo.  
- *
- * returns SLURM_SUCCESS if undo was successful.
+/* 
+ * Admin wants to remove a previous allocation.
+ * will allow Admin to delete a previous allocation retrival by letter code.
  */
 extern int remove_block(List nodes, int new_count)
 {
@@ -1148,11 +1263,11 @@ extern int remove_block(List nodes, int new_count)
 	return 1;
 }
 
-/** 
- * Doh!  Admin made a boo boo.  Note: Undo only has one history
- * element, so two consecutive undo's will fail.
- *
- * returns SLURM_SUCCESS if undo was successful.
+/* 
+ * Admin wants to change something about a previous allocation. 
+ * will allow Admin to change previous allocation by giving the 
+ * letter code for the allocation and the variable to alter
+ * (Not currently used in the system, update this if it is)
  */
 extern int alter_block(List nodes, int conn_type)
 {
@@ -1185,10 +1300,10 @@ extern int alter_block(List nodes, int conn_type)
 /* 	} */
 }
 
-/** 
+/* 
  * After a block is deleted or altered following allocations must
  * be redone to make sure correct path will be used in the real system
- *
+ * (Not currently used in the system, update this if it is)
  */
 extern int redo_block(List nodes, int *geo, int conn_type, int new_count)
 {
@@ -1211,64 +1326,19 @@ extern int redo_block(List nodes, int *geo, int conn_type, int new_count)
 	}
 }
 
-extern int copy_node_path(List nodes, List dest_nodes)
-{
-	int rc = SLURM_ERROR;
-	
-#ifdef HAVE_BG
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	ba_node_t *ba_node = NULL, *new_ba_node = NULL;
-	int dim;
-	ba_switch_t *curr_switch = NULL, *new_switch = NULL; 
-	
-	if(!nodes)
-		return SLURM_ERROR;
-	if(!dest_nodes)
-		dest_nodes = list_create(destroy_ba_node);
-
-	itr = list_iterator_create(nodes);
-	while((ba_node = list_next(itr))) {
-		itr2 = list_iterator_create(dest_nodes);
-		while((new_ba_node = list_next(itr2))) {
-			if (ba_node->coord[X] == new_ba_node->coord[X] &&
-			    ba_node->coord[Y] == new_ba_node->coord[Y] &&
-			    ba_node->coord[Z] == new_ba_node->coord[Z]) 
-				break;	/* we found it */
-		}
-		list_iterator_destroy(itr2);
-	
-		if(!new_ba_node) {
-			debug2("adding %c%c%c as a new node",
-			       alpha_num[ba_node->coord[X]], 
-			       alpha_num[ba_node->coord[Y]],
-			       alpha_num[ba_node->coord[Z]]);
-			new_ba_node = ba_copy_node(ba_node);
-			_new_ba_node(new_ba_node, ba_node->coord, false);
-			list_push(dest_nodes, new_ba_node);
-			
-		}
-		new_ba_node->used = true;
-		for(dim=0;dim<BA_SYSTEM_DIMENSIONS;dim++) {		
-			curr_switch = &ba_node->axis_switch[dim];
-			new_switch = &new_ba_node->axis_switch[dim];
-			if(curr_switch->int_wire[0].used) {
-				if(!_copy_the_path(dest_nodes, 
-						   curr_switch, new_switch,
-						   0, dim)) {
-					rc = SLURM_ERROR;
-					break;
-				}
-			}
-		}
-		
-	}
-	list_iterator_destroy(itr);
-	rc = SLURM_SUCCESS;
-#endif	
-	return rc;
-}
-
+/*
+ * Used to set a block into a virtual system.  The system can be
+ * cleared first and this function sets all the wires and midplanes
+ * used in the nodelist given.  The nodelist is a list of ba_node_t's
+ * that are already set up.  This is very handly to test if there are
+ * any passthroughs used by one block when adding another block that
+ * also uses those wires, and neither use any overlapping
+ * midplanes. Doing a simple bitmap & will not reveal this.
+ *
+ * Returns SLURM_SUCCESS if nodelist fits into system without
+ * conflict, and SLURM_ERROR if nodelist conflicts with something
+ * already in the system.
+ */
 extern int check_and_set_node_list(List nodes)
 {
 	int rc = SLURM_ERROR;
@@ -1352,6 +1422,19 @@ end_it:
 	return rc;
 }
 
+/*
+ * Used to find, and set up midplanes and the wires in the virtual
+ * system and return them in List results 
+ * 
+ * IN/OUT results - a list with a NULL destroyer filled in with
+ *        midplanes and wires set to create the block with the api. If
+ *        only interested in the hostlist NULL can be excepted also.
+ * IN start - where to start the allocation.
+ * IN geometry - the requested geometry of the block.
+ * IN conn_type - mesh, torus, or small.
+ * RET char * - hostlist of midplanes results represent must be
+ *     xfreed.  NULL on failure
+ */
 extern char *set_bg_block(List results, int *start, 
 			  int *geometry, int conn_type)
 {
@@ -1457,6 +1540,7 @@ extern char *set_bg_block(List results, int *start,
 end_it:
 	if(!send_results && results) {
 		list_destroy(results);
+		results = NULL;
 	}
 	if(name!=NULL) {
 		debug2("name = %s", name);
@@ -1468,6 +1552,10 @@ end_it:
 	return name;	
 }
 
+/*
+ * Resets the virtual system to a virgin state.  If track_down_nodes is set
+ * then those midplanes are not set to idle, but kept in a down state.
+ */
 extern int reset_ba_system(bool track_down_nodes)
 {
 	int x;
@@ -1496,8 +1584,15 @@ extern int reset_ba_system(bool track_down_nodes)
 	return 1;
 }
 
-/* need to call rest_all_removed_bps before starting another
- * allocation attempt 
+/*
+ * Used to set all midplanes in a special used state except the ones
+ * we are able to use in a new allocation.
+ *
+ * IN: hostlist of midplanes we do not want
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Note: Need to call reset_all_removed_bps before starting another
+ * allocation attempt after 
  */
 extern int removable_set_bps(char *bps)
 {
@@ -1568,6 +1663,10 @@ extern int removable_set_bps(char *bps)
  	return SLURM_SUCCESS;
 }
 
+/*
+ * Resets the virtual system to the pervious state before calling
+ * removable_set_bps, or set_all_bps_except.
+ */
 extern int reset_all_removed_bps()
 {
 	int x;
@@ -1588,7 +1687,11 @@ extern int reset_all_removed_bps()
 	return SLURM_SUCCESS;
 }
 
-/* need to call rest_all_removed_bps before starting another
+/*
+ * IN: hostlist of midplanes we do not want
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Need to call rest_all_removed_bps before starting another
  * allocation attempt if possible use removable_set_bps since it is
  * faster. It does basically the opposite of this function. If you
  * have to come up with this list though it is faster to use this
@@ -1676,7 +1779,9 @@ extern int set_all_bps_except(char *bps)
  	return SLURM_SUCCESS;
 }
 
-/* init_grid - set values of every grid point */
+/*
+ * set values of every grid point (used in smap)
+ */
 extern void init_grid(node_info_msg_t * node_info_ptr)
 {
 	node_info_t *node_ptr = NULL;
@@ -1761,6 +1866,155 @@ extern void init_grid(node_info_msg_t * node_info_ptr)
 	return;
 }
 
+/*
+ * Convert a BG API error code to a string
+ * IN inx - error code from any of the BG Bridge APIs
+ * RET - string describing the error condition
+ */
+extern char *bg_err_str(status_t inx)
+{
+#ifdef HAVE_BG_FILES
+	switch (inx) {
+	case STATUS_OK:
+		return "Status OK";
+	case PARTITION_NOT_FOUND:
+		return "Partition not found";
+	case JOB_NOT_FOUND:
+		return "Job not found";
+	case BP_NOT_FOUND:
+		return "Base partition not found";
+	case SWITCH_NOT_FOUND:
+		return "Switch not found";
+	case JOB_ALREADY_DEFINED:
+		return "Job already defined";
+	case CONNECTION_ERROR:
+		return "Connection error";
+	case INTERNAL_ERROR:
+		return "Internal error";
+	case INVALID_INPUT:
+		return "Invalid input";
+	case INCOMPATIBLE_STATE:
+		return "Incompatible state";
+	case INCONSISTENT_DATA:
+		return "Inconsistent data";
+	}
+#endif
+
+	return "?";
+}
+
+/*
+ * Set up the map for resolving
+ */
+extern int set_bp_map(void)
+{
+#ifdef HAVE_BG_FILES
+	static rm_BGL_t *bg = NULL;
+	int rc;
+	rm_BP_t *my_bp = NULL;
+	ba_bp_map_t *bp_map = NULL;
+	int bp_num, i;
+	char *bp_id = NULL;
+	rm_location_t bp_loc;
+	int number = 0;
+
+	if(_bp_map_initialized)
+		return 1;
+
+	bp_map_list = list_create(_bp_map_list_del);
+
+	if (!have_db2) {
+		fatal("Can't access DB2 library, run from service node");
+		return -1;
+	}
+
+	if (!getenv("DB2INSTANCE") || !getenv("VWSPATH")) {
+		fatal("Missing DB2INSTANCE or VWSPATH env var."
+			"Execute 'db2profile'");
+		return -1;
+	}
+	
+	if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
+		error("bridge_get_BGL(): %d", rc);
+		return -1;
+	}
+	
+	if ((rc = bridge_get_data(bg, RM_BPNum, &bp_num)) != STATUS_OK) {
+		error("bridge_get_data(RM_BPNum): %d", rc);
+		bp_num = 0;
+	}
+
+	for (i=0; i<bp_num; i++) {
+
+		if (i) {
+			if ((rc = bridge_get_data(bg, RM_NextBP, &my_bp))
+			    != STATUS_OK) {
+				error("bridge_get_data(RM_NextBP): %d", rc);
+				break;
+			}
+		} else {
+			if ((rc = bridge_get_data(bg, RM_FirstBP, &my_bp))
+			    != STATUS_OK) {
+				error("bridge_get_data(RM_FirstBP): %d", rc);
+				break;
+			}
+		}
+		
+		bp_map = (ba_bp_map_t *) xmalloc(sizeof(ba_bp_map_t));
+		
+		if ((rc = bridge_get_data(my_bp, RM_BPID, &bp_id))
+		    != STATUS_OK) {
+			xfree(bp_map);
+			error("bridge_get_data(RM_BPID): %d", rc);
+			continue;
+		}
+
+		if(!bp_id) {
+			error("No BP ID was returned from database");
+			continue;
+		}
+			
+		if ((rc = bridge_get_data(my_bp, RM_BPLoc, &bp_loc))
+		    != STATUS_OK) {
+			xfree(bp_map);
+			error("bridge_get_data(RM_BPLoc): %d", rc);
+			continue;
+		}
+		
+		bp_map->bp_id = xstrdup(bp_id);
+		bp_map->coord[X] = bp_loc.X;
+		bp_map->coord[Y] = bp_loc.Y;
+		bp_map->coord[Z] = bp_loc.Z;
+		
+		number = xstrntol(bp_id+1, NULL,
+				  BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE);
+/* no longer needed for calculation */
+/* 		if(DIM_SIZE[X] > bp_loc.X */
+/* 		   && DIM_SIZE[Y] > bp_loc.Y */
+/* 		   && DIM_SIZE[Z] > bp_loc.Z) */
+/* 			ba_system_ptr->grid */
+/* 				[bp_loc.X] */
+/* 				[bp_loc.Y] */
+/* 				[bp_loc.Z].phys_x = */
+/* 				number / (HOSTLIST_BASE * HOSTLIST_BASE); */
+		
+		list_push(bp_map_list, bp_map);
+		
+		free(bp_id);		
+	}
+
+	if ((rc = bridge_free_bg(bg)) != STATUS_OK)
+		error("bridge_free_BGL(): %s", rc);	
+	
+#endif
+	_bp_map_initialized = true;
+	return 1;
+	
+}
+
+/*
+ * find a base blocks bg location 
+ */
 extern int *find_bp_loc(char* bp_id)
 {
 #ifdef HAVE_BG_FILES
@@ -1787,6 +2041,9 @@ extern int *find_bp_loc(char* bp_id)
 #endif
 }
 
+/*
+ * find a rack/midplace location 
+ */
 extern char *find_bp_rack_mid(char* xyz)
 {
 #ifdef HAVE_BG_FILES
@@ -1826,6 +2083,9 @@ extern char *find_bp_rack_mid(char* xyz)
 #endif
 }
 
+/*
+ * set the used wires in the virtual system for a block from the real system 
+ */
 extern int load_block_wiring(char *bg_block_id)
 {
 #ifdef HAVE_BG_FILES
@@ -2043,6 +2303,10 @@ extern int load_block_wiring(char *bg_block_id)
 	
 }
 
+/*
+ * get the used wires for a block out of the database and return the
+ * node list
+ */
 extern List get_and_set_block_wiring(char *bg_block_id)
 {
 #ifdef HAVE_BG_FILES
@@ -2300,6 +2564,7 @@ static void _bp_map_list_del(void *object)
 	}
 }
 
+/* translation from the enum to the actual port number */
 static int _port_enum(int port)
 {
 	switch(port) {
@@ -2328,6 +2593,10 @@ static int _port_enum(int port)
 
 #endif
 
+/*
+ * This function is here to check options for rotating and elongating
+ * and set up the request based on the count of each option
+ */
 static int _check_for_options(ba_request_t* ba_request) 
 {
 	int temp;
@@ -2394,6 +2663,9 @@ static int _check_for_options(ba_request_t* ba_request)
 	return 0;
 }
 
+/* 
+ * grab all the geometries that we can get and append them to the list geos
+ */
 static int _append_geo(int *geometry, List geos, int rotate) 
 {
 	ListIterator itr;
@@ -2436,6 +2708,9 @@ static int _append_geo(int *geometry, List geos, int rotate)
 	return 1;
 }
 
+/*
+ *
+ */
 static int _fill_in_coords(List results, List start_list,
 			    int *geometry, int conn_type)
 {
@@ -2903,150 +3178,6 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source,
 //	return 1;
 }
 
-/*
- * Convert a BG API error code to a string
- * IN inx - error code from any of the BG Bridge APIs
- * RET - string describing the error condition
- */
-extern char *bg_err_str(status_t inx)
-{
-#ifdef HAVE_BG_FILES
-	switch (inx) {
-	case STATUS_OK:
-		return "Status OK";
-	case PARTITION_NOT_FOUND:
-		return "Partition not found";
-	case JOB_NOT_FOUND:
-		return "Job not found";
-	case BP_NOT_FOUND:
-		return "Base partition not found";
-	case SWITCH_NOT_FOUND:
-		return "Switch not found";
-	case JOB_ALREADY_DEFINED:
-		return "Job already defined";
-	case CONNECTION_ERROR:
-		return "Connection error";
-	case INTERNAL_ERROR:
-		return "Internal error";
-	case INVALID_INPUT:
-		return "Invalid input";
-	case INCOMPATIBLE_STATE:
-		return "Incompatible state";
-	case INCONSISTENT_DATA:
-		return "Inconsistent data";
-	}
-#endif
-
-	return "?";
-}
-
-/** */
-extern int set_bp_map(void)
-{
-#ifdef HAVE_BG_FILES
-	static rm_BGL_t *bg = NULL;
-	int rc;
-	rm_BP_t *my_bp = NULL;
-	ba_bp_map_t *bp_map = NULL;
-	int bp_num, i;
-	char *bp_id = NULL;
-	rm_location_t bp_loc;
-	int number = 0;
-
-	if(_bp_map_initialized)
-		return 1;
-
-	bp_map_list = list_create(_bp_map_list_del);
-
-	if (!have_db2) {
-		fatal("Can't access DB2 library, run from service node");
-		return -1;
-	}
-
-	if (!getenv("DB2INSTANCE") || !getenv("VWSPATH")) {
-		fatal("Missing DB2INSTANCE or VWSPATH env var."
-			"Execute 'db2profile'");
-		return -1;
-	}
-	
-	if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-		error("bridge_get_BGL(): %d", rc);
-		return -1;
-	}
-	
-	if ((rc = bridge_get_data(bg, RM_BPNum, &bp_num)) != STATUS_OK) {
-		error("bridge_get_data(RM_BPNum): %d", rc);
-		bp_num = 0;
-	}
-
-	for (i=0; i<bp_num; i++) {
-
-		if (i) {
-			if ((rc = bridge_get_data(bg, RM_NextBP, &my_bp))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_NextBP): %d", rc);
-				break;
-			}
-		} else {
-			if ((rc = bridge_get_data(bg, RM_FirstBP, &my_bp))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_FirstBP): %d", rc);
-				break;
-			}
-		}
-		
-		bp_map = (ba_bp_map_t *) xmalloc(sizeof(ba_bp_map_t));
-		
-		if ((rc = bridge_get_data(my_bp, RM_BPID, &bp_id))
-		    != STATUS_OK) {
-			xfree(bp_map);
-			error("bridge_get_data(RM_BPID): %d", rc);
-			continue;
-		}
-
-		if(!bp_id) {
-			error("No BP ID was returned from database");
-			continue;
-		}
-			
-		if ((rc = bridge_get_data(my_bp, RM_BPLoc, &bp_loc))
-		    != STATUS_OK) {
-			xfree(bp_map);
-			error("bridge_get_data(RM_BPLoc): %d", rc);
-			continue;
-		}
-		
-		bp_map->bp_id = xstrdup(bp_id);
-		bp_map->coord[X] = bp_loc.X;
-		bp_map->coord[Y] = bp_loc.Y;
-		bp_map->coord[Z] = bp_loc.Z;
-		
-		number = xstrntol(bp_id+1, NULL,
-				  BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE);
-/* no longer needed for calculation */
-/* 		if(DIM_SIZE[X] > bp_loc.X */
-/* 		   && DIM_SIZE[Y] > bp_loc.Y */
-/* 		   && DIM_SIZE[Z] > bp_loc.Z) */
-/* 			ba_system_ptr->grid */
-/* 				[bp_loc.X] */
-/* 				[bp_loc.Y] */
-/* 				[bp_loc.Z].phys_x = */
-/* 				number / (HOSTLIST_BASE * HOSTLIST_BASE); */
-		
-		list_push(bp_map_list, bp_map);
-		
-		free(bp_id);		
-	}
-
-	if ((rc = bridge_free_bg(bg)) != STATUS_OK)
-		error("bridge_free_BGL(): %s", rc);	
-	
-#endif
-	_bp_map_initialized = true;
-	return 1;
-	
-}
-
 static void _new_ba_node(ba_node_t *ba_node, int *coord, bool track_down_nodes)
 {
 	int i,j;
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h
index 133654462f61a550aae9e54393f56da93aeef35e..36ec351bb7aac76935aa2721bebca69aef47a1e8 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.h
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.h
@@ -58,103 +58,115 @@ enum {X, Y, Z};
 
 /* */
 
-/** 
+/* 
  * structure that holds switch path information for finding the wiring 
  * path without setting the configuration.
  *
- * - geometry - node location
  * - dim      - Which Axis it is on
+ * - geometry - node location
  * - in       - ingress port.
  * - out      - egress port.
  * 
  */
 typedef struct {
-	int geometry[BA_SYSTEM_DIMENSIONS];
 	int dim;
+	int geometry[BA_SYSTEM_DIMENSIONS];
 	int in; 
 	int out;
 } ba_path_switch_t; 
 
-/** 
+/* 
  * structure that holds the configuration settings for each request
- * 
- * - letter            - filled in after the request is fulfilled
- * - geometry          - request size
- * - size              - node count for request
- * - conn_type         - MESH or TORUS or SMALL
- * - rotate_count      - when rotating we keep a count so we aren't in an infinate loop.
- * - elongate_count    - when elongating we keep a count so we aren't in an infinate loop.
- * - rotate            - weather to allow rotating or not.
- * - elongate          - weather to allow elongating or not.
- * - force_contig      - weather to allow force contiguous or not.
- * 
  */
 typedef struct {
-	char *save_name;
+	bitstr_t *avail_node_bitmap;   /* pointer to available nodes */	
 	char *blrtsimage;              /* BlrtsImage for this block */
+	int conn_type;                 /* mesh, torus, or small */
+	bool elongate;                 /* whether allow elongation or not */
+	int elongate_count;            /* place in elongate_geos list
+					  we are at */
+	List elongate_geos;            /* list of possible shapes of
+					  blocks. contains int* ptrs */
+	int geometry[BA_SYSTEM_DIMENSIONS]; /* size of block in geometry */
 	char *linuximage;              /* LinuxImage for this block */
 	char *mloaderimage;            /* mloaderImage for this block */
+	int nodecards;                 /* number of nodecards in
+					* block, only used for small
+					* block creation */
+	bool passthrough;              /* filled in if there are
+					  passthroughs in the block created */
+	int procs;                     /* Number of Real processors in
+					  block */
+	int quarters;                  /* number of midplane quarters in
+					* block, only used for small
+					* block creation */
 	char *ramdiskimage;            /* RamDiskImage for this block */
-	int geometry[BA_SYSTEM_DIMENSIONS];
-	int start[BA_SYSTEM_DIMENSIONS];
-	int start_req;
-	int size; 
-	int procs; 
-	int conn_type;
-	int rotate_count;
-	int elongate_count;
-	int nodecards;
-	int quarters;
-	bool passthrough;
-	bool rotate;
-	bool elongate; 
-	List elongate_geos;
-	bitstr_t *avail_node_bitmap;	/* pointer to available nodes */	
+	bool rotate;                   /* whether allow elongation or not */
+	int rotate_count;              /* number of times rotated */
+	char *save_name;               /* name of blocks in midplanes */
+	int size;                      /* count of midplanes in block */
+	int start[BA_SYSTEM_DIMENSIONS]; /* where to start creation of
+					    block */
+	int start_req;                 /* state there was a start
+					  request */
 } ba_request_t; 
 
+/* structure filled in from reading bluegene.conf file for block
+ * creation */
 typedef struct {
-	char *block;
+	char *block;                   /* Hostlist of midplanes in the
+					  block */
+	int conn_type;                 /* mesh, torus, or small */
 	char *blrtsimage;              /* BlrtsImage for this block */
 	char *linuximage;              /* LinuxImage for this block */
 	char *mloaderimage;            /* mloaderImage for this block */
+	uint16_t nodecards;            /* number of nodecards in
+					* block, only used for small
+					* block creation */
+	uint16_t quarters;             /* number of midplane quarters in
+					* block, only used for small
+					* block creation */
 	char *ramdiskimage;            /* RamDiskImage for this block */
-	int conn_type;
-	uint16_t quarters;
-	uint16_t nodecards;
 } blockreq_t;
 
+/* structure filled in from reading bluegene.conf file for specifing
+ * images */
 typedef struct {
-	char *name;
-	bool def;
-	List groups;
+	bool def;                      /* Whether image is the default
+					  image or not */
+	List groups;                   /* list of groups able to use
+					* the image contains
+					* image_group_t's */
+	char *name;                    /* Name of image */
 } image_t;
 
 typedef struct {
 	char *name;
 	gid_t gid;
 } image_group_t;
-/** 
+
+/* 
  * structure that holds the configuration settings for each connection
  * 
- * - port_tar - which port the connection is going to
- *              interanlly - always going to something within the switch.
- *              exteranlly - always going to the next hop outside the switch.
  * - node_tar - coords of where the next hop is externally
  *              interanlly - nothing.
  *              exteranlly - location of next hop.
+ * - port_tar - which port the connection is going to
+ *              interanlly - always going to something within the switch.
+ *              exteranlly - always going to the next hop outside the switch.
  * - used     - weather or not the connection is used.
  * 
  */
 typedef struct 
 {
-	/* target port */ 
-	int port_tar;
-
 	/* target label */
 	int node_tar[BA_SYSTEM_DIMENSIONS];
+	/* target port */ 
+	int port_tar;
 	bool used;	
 } ba_connection_t;
-/** 
+
+/* 
  * structure that holds the configuration settings for each switch
  * which pretty much means the wiring information 
  * - int_wire - keeps details of where the wires are attached
@@ -173,23 +185,28 @@ typedef struct
  * ba_node_t: node within the allocation system.
  */
 typedef struct {
-	/* set if using this node in a block */
-	uint16_t used;
-
-	/* coordinates */
+	/* a switch for each dimensions */
+	ba_switch_t axis_switch[BA_SYSTEM_DIMENSIONS]; 
+	/* coordinates of midplane */
 	int coord[BA_SYSTEM_DIMENSIONS];
-	ba_switch_t axis_switch[BA_SYSTEM_DIMENSIONS];
-	char letter;
+	/* color of letter used in smap */
 	int color;
+	/* midplane index used for easy look up of the miplane */
 	int index;
-	int state;
+	/* letter used in smap */
+	char letter;                    
 //	int phys_x;	// no longer needed 
+	int state;
+	/* set if using this midplane in a block */
+	uint16_t used;
 } ba_node_t;
 
 typedef struct {
+	/* total number of procs on the system */
 	int num_of_proc;
 
-	/* made to hold info about a system, which right now is only a grid of ba_nodes*/
+	/* made to hold info about a system, which right now is only a
+	 * grid of ba_nodes*/
 #ifdef HAVE_BG
 	ba_node_t ***grid;
 #else
@@ -200,25 +217,32 @@ typedef struct {
 /* Used to Keep track of where the Base Blocks are at all times
    Rack and Midplane is the bp_id and XYZ is the coords.
 */
-
 typedef struct {
 	char *bp_id;
 	int coord[BA_SYSTEM_DIMENSIONS];	
 } ba_bp_map_t;
 
 /* Global */
-extern List bp_map_list;
-extern char letters[62];
-extern char colors[6];
-extern int DIM_SIZE[BA_SYSTEM_DIMENSIONS];
-extern s_p_options_t bg_conf_file_options[];
-
+extern List bp_map_list; /* list used for conversion from XYZ to Rack
+			  * midplane */
+extern char letters[62]; /* complete list of letters used in smap */
+extern char colors[6]; /* index into colors used for smap */
+extern int DIM_SIZE[BA_SYSTEM_DIMENSIONS]; /* how many midplanes in
+					    * each dimension */
+extern s_p_options_t bg_conf_file_options[]; /* used to parse the
+					      * bluegene.conf file. */
+
+/* Translate a state enum to a readable string */
 extern char *bg_block_state_string(rm_partition_state_t state);
+
+/* Parse a block request from the bluegene.conf file */
 extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
 			  const char *key, const char *value, 
 			  const char *line, char **leftover);
 
 extern void destroy_blockreq(void *ptr);
+
+/* Parse imagine information from blugene.conf file */
 extern int parse_image(void **dest, slurm_parser_enum_t type,
 		       const char *key, const char *value, 
 		       const char *line, char **leftover);
@@ -231,18 +255,34 @@ extern void destroy_ba_node(void *ptr);
  * create a block request.  Note that if the geometry is given,
  * then size is ignored.  If elongate is true, the algorithm will try
  * to fit that a block of cubic shape and then it will try other
- * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1). Note that
- * size must be a power of 2, given 3 dimensions.
+ * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1). 
  * 
- * OUT - ba_request: structure to allocate and fill in.  
- * IN - geometry: requested geometry of block
- * IN - size: requested size of block
- * IN - rotate: if true, allows rotation of block during fit
+ * IN/OUT - ba_request: structure to allocate and fill in.  
+ * 
+ * ALL below IN's need to be set within the ba_request before the call
+ * if you want them to be used.
+ * ALL below OUT's are set and returned within the ba_request.
+ * IN - avail_node_bitmap: bitmap of usable midplanes.
+ * IN - blrtsimage: BlrtsImage for this block if not default
+ * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
  * IN - elongate: if true, will try to fit different geometries of
  *      same size requests
- * IN - contig: enforce contiguous regions constraint
- * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
- * 
+ * IN/OUT - geometry: requested/returned geometry of block
+ * IN - linuximage: LinuxImage for this block if not default
+ * IN - mloaderimage: MLoaderImage for this block if not default
+ * IN - nodecards: Number of nodecards in each block in request only
+ *      used of small block allocations.
+ * OUT - passthroughs: if there were passthroughs used in the
+ *       generation of the block.
+ * IN - procs: Number of real processors requested
+ * IN - quarters: Number of midplane quarters in each block in request only
+ *      used of small block allocations.
+ * IN - RamDiskimage: RamDiskImage for this block if not default
+ * IN - rotate: if true, allows rotation of block during fit
+ * OUT - save_name: hostlist of midplanes used in block
+ * IN/OUT - size: requested/returned count of midplanes in block
+ * IN - start: geo location of where to start the allocation
+ * IN - start_req: if set use the start variable to start at
  * return success of allocation/validation of params
  */
 extern int new_ba_request(ba_request_t* ba_request);
@@ -267,44 +307,53 @@ extern void print_ba_request(ba_request_t* ba_request);
  * Initialize internal structures by either reading previous block
  * configurations from a file or by running the graph solver.
  * 
- * IN: dunno yet, probably some stuff denoting downed nodes, etc.
+ * IN: node_info_msg_t * can be null, 
+ *     should be from slurm_load_node().
  * 
- * return: success or error of the intialization.
+ * return: void.
  */
-extern void ba_init();
-/*
+extern void ba_init(node_info_msg_t *node_info_ptr);
+
+/* If emulating a system set up a known configuration for wires in a
+ * system of the size given.
+ * If a real bluegene system, query the system and get all wiring
+ * information of the system.
  */
 extern void init_wires();
-/** 
+
+/* 
  * destroy all the internal (global) data structs.
  */
 extern void ba_fini();
 
-/** 
- * set the node in the internal configuration as unusable
+/* 
+ * set the node in the internal configuration as in, or not in use,
+ * along with the current state of the node.
  * 
- * IN ba_node: ba_node_t to put down
+ * IN ba_node: ba_node_t to update state
  * IN state: new state of ba_node_t
  */
 extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state);
 
-/** 
- * copy info from a ba_node
+/* 
+ * copy info from a ba_node, a direct memcpy of the ba_node_t
  * 
  * IN ba_node: node to be copied
- * OUT ba_node_t *: copied info must be freed with destroy_ba_node
+ * Returned ba_node_t *: copied info must be freed with destroy_ba_node
  */
 extern ba_node_t *ba_copy_node(ba_node_t *ba_node);
 
-/** 
+/* 
  * copy the path of the nodes given
  * 
- * IN List of ba_node_t *'s: nodes to be copied
- * OUT List of ba_node_t *'s: filled in list of nodes wiring
+ * IN nodes List of ba_node_t *'s: nodes to be copied
+ * OUT dest_nodes List of ba_node_t *'s: filled in list of nodes
+ * wiring.
+ * Return on success SLURM_SUCCESS, on error SLURM_ERROR
  */
-extern int copy_node_path(List nodes, List dest_nodes);
+extern int copy_node_path(List nodes, List *dest_nodes);
 
-/** 
+/* 
  * Try to allocate a block.
  * 
  * IN - ba_request: allocation request
@@ -316,38 +365,99 @@ extern int copy_node_path(List nodes, List dest_nodes);
  */
 extern int allocate_block(ba_request_t* ba_request, List results);
 
-/** 
+/* 
  * Admin wants to remove a previous allocation.
  * will allow Admin to delete a previous allocation retrival by letter code.
  */
 extern int remove_block(List nodes, int new_count);
 
-/** 
+/* 
  * Admin wants to change something about a previous allocation. 
  * will allow Admin to change previous allocation by giving the 
  * letter code for the allocation and the variable to alter
- *
+ * (Not currently used in the system, update this if it is)
  */
 extern int alter_block(List nodes, int conn_type);
 
-/** 
+/* 
  * After a block is deleted or altered following allocations must
  * be redone to make sure correct path will be used in the real system
- *
+ * (Not currently used in the system, update this if it is)
  */
 extern int redo_block(List nodes, int *geo, int conn_type, int new_count);
 
+/*
+ * Used to set a block into a virtual system.  The system can be
+ * cleared first and this function sets all the wires and midplanes
+ * used in the nodelist given.  The nodelist is a list of ba_node_t's
+ * that are already set up.  This is very handly to test if there are
+ * any passthroughs used by one block when adding another block that
+ * also uses those wires, and neither use any overlapping
+ * midplanes. Doing a simple bitmap & will not reveal this.
+ *
+ * Returns SLURM_SUCCESS if nodelist fits into system without
+ * conflict, and SLURM_ERROR if nodelist conflicts with something
+ * already in the system.
+ */
 extern int check_and_set_node_list(List nodes);
 
+/*
+ * Used to find, and set up midplanes and the wires in the virtual
+ * system and return them in List results 
+ * 
+ * IN/OUT results - a list with a NULL destroyer filled in with
+ *        midplanes and wires set to create the block with the api. If
+ *        only interested in the hostlist NULL can be excepted also.
+ * IN start - where to start the allocation.
+ * IN geometry - the requested geometry of the block.
+ * IN conn_type - mesh, torus, or small.
+ * RET char * - hostlist of midplanes results represent must be
+ *     xfreed.  NULL on failure
+ */
 extern char *set_bg_block(List results, int *start, 
 			  int *geometry, int conn_type);
 
+/*
+ * Resets the virtual system to a virgin state.  If track_down_nodes is set
+ * then those midplanes are not set to idle, but kept in a down state.
+ */
 extern int reset_ba_system(bool track_down_nodes);
+
+/*
+ * Used to set all midplanes in a special used state except the ones
+ * we are able to use in a new allocation.
+ *
+ * IN: hostlist of midplanes we do not want
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Note: Need to call reset_all_removed_bps before starting another
+ * allocation attempt after 
+ */
 extern int removable_set_bps(char *bps);
+
+/*
+ * Resets the virtual system to the pervious state before calling
+ * removable_set_bps, or set_all_bps_except.
+ */
 extern int reset_all_removed_bps();
+
+/*
+ * IN: hostlist of midplanes we do not want
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Need to call rest_all_removed_bps before starting another
+ * allocation attempt.  If possible use removable_set_bps since it is
+ * faster. It does basically the opposite of this function. If you
+ * have to come up with this list though it is faster to use this
+ * function than if you have to call bitmap2node_name since that is slow.
+ */
 extern int set_all_bps_except(char *bps);
 
+/*
+ * set values of every grid point (used in smap)
+ */
 extern void init_grid(node_info_msg_t *node_info_ptr);
+
 /*
  * Convert a BG API error code to a string
  * IN inx - error code from any of the BG Bridge APIs
@@ -355,27 +465,27 @@ extern void init_grid(node_info_msg_t *node_info_ptr);
  */
 extern char *bg_err_str(status_t inx);
 
-/**
+/*
  * Set up the map for resolving
  */
 extern int set_bp_map(void);
 
-/**
- * find a base blocks bg location 
+/*
+ * find a base blocks bg location based on Rack Midplane name R000 not R00-M0
  */
 extern int *find_bp_loc(char* bp_id);
 
-/**
- * find a rack/midplace location 
+/*
+ * find a rack/midplace location based on XYZ coords
  */
 extern char *find_bp_rack_mid(char* xyz);
 
-/**
- * set the used wires for a block out of the database 
+/*
+ * set the used wires in the virtual system for a block from the real system 
  */
 extern int load_block_wiring(char *bg_block_id);
 
-/**
+/*
  * get the used wires for a block out of the database and return the
  * node list
  */
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index c56e23ce28988ae5cfbb71063260e96ddac3c2c5..a7435af07c4fffb597acf4910e5699ccce4c02a1 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -531,6 +531,7 @@ static int _check_for_booted_overlapping_blocks(
 	bg_record_t *found_record = NULL;
 	ListIterator itr = NULL;
 	int rc = 0;
+	int overlap = 0;
 
 	 /* this test only is for actually picking a block not testing */
 	if(test_only && bluegene_layout_mode == LAYOUT_DYNAMIC)
@@ -549,7 +550,12 @@ static int _check_for_booted_overlapping_blocks(
 			continue;
 		}
 		
-		if(blocks_overlap(bg_record, found_record)) {
+		slurm_mutex_lock(&block_state_mutex);
+		overlap = blocks_overlap(bg_record, found_record);
+		slurm_mutex_unlock(&block_state_mutex);
+
+		if(overlap) {
+			overlap = 0;
 			/* make the available time on this block
 			 * (bg_record) the max of this found_record's job
 			 * or the one already set if in overlapped_block_list
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c
index 3152ec28e6521b6c49495748079c60d0c7da19dd..1b6339db6517c9c700187ed94b3283a3d9345ae6 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.c
@@ -62,6 +62,7 @@
 #include "src/common/xstring.h"
 #include "src/slurmctld/proc_req.h"
 #include "bluegene.h"
+#include "src/slurmctld/locks.h"
 
 #define MAX_POLL_RETRIES    220
 #define POLL_INTERVAL        3
@@ -185,6 +186,50 @@ static int _remove_job(db_job_id_t job_id)
 }
 #endif
 
+/* block_state_mutex should be locked before calling this function */
+static int _reset_block(bg_record_t *bg_record) 
+{
+	int rc = SLURM_SUCCESS;
+	if(bg_record) {
+		if(bg_record->job_running > NO_JOB_RUNNING) {
+			bg_record->job_running = NO_JOB_RUNNING;
+			bg_record->job_ptr = NULL;
+		}
+		/* remove user from list */
+		
+		slurm_conf_lock();
+		if(bg_record->target_name) {
+			if(strcmp(bg_record->target_name, 
+				  slurmctld_conf.slurm_user_name)) {
+				xfree(bg_record->target_name);
+				bg_record->target_name = 
+					xstrdup(slurmctld_conf.
+						slurm_user_name);
+			}
+			update_block_user(bg_record, 1);
+		} else {
+			bg_record->target_name = 
+				xstrdup(slurmctld_conf.slurm_user_name);
+		}	
+		slurm_conf_unlock();
+			
+		bg_record->boot_state = 0;
+		bg_record->boot_count = 0;
+		
+		last_bg_update = time(NULL);
+		if(remove_from_bg_list(bg_job_block_list, bg_record) 
+		   == SLURM_SUCCESS) {
+			num_unused_cpus += 
+				bg_record->bp_count*bg_record->cpus_per_bp;
+		}
+	} else {
+		error("No block given to reset");
+		rc = SLURM_ERROR;
+	}
+
+	return rc;
+}
+
 /* Delete a bg_update_t record */
 static void _bg_list_del(void *x)
 {
@@ -262,7 +307,10 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	bg_record_t *found_record = NULL;
 	ListIterator itr;
 	List delete_list;
-	
+	int requeue_job = 0;
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
+
 	slurm_mutex_lock(&job_start_mutex);
 		
 	bg_record = 
@@ -271,7 +319,20 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	if(!bg_record) {
 		error("block %s not found in bg_list",
 		      bg_update_ptr->bg_block_id);
-		(void) slurm_fail_job(bg_update_ptr->job_ptr->job_id);
+		/* wait for the slurmd to begin 
+		   the batch script, slurm_fail_job() 
+		   is a no-op if issued prior 
+		   to the script initiation do clean up just
+		   incase the fail job isn't ran */
+		sleep(2);	
+		lock_slurmctld(job_write_lock);
+		if((rc = job_requeue(0, bg_update_ptr->job_ptr->job_id, -1))) {
+			error("couldn't requeue job %u, failing it: %s",
+			      bg_update_ptr->job_ptr->job_id, 
+			      slurm_strerror(rc));
+			job_fail(bg_update_ptr->job_ptr->job_id);
+		}
+		unlock_slurmctld(job_write_lock);
 		slurm_mutex_unlock(&job_start_mutex);
 		return;
 	}
@@ -306,7 +367,21 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 			       bg_record->bg_block_id);
 			continue;
 		}
-		
+
+		if(found_record->job_ptr) {
+			error("Trying to start job %u on block %s, "
+			      "but there is a job %u running on an overlapping "
+			      "block %s it will not end until %u.  "
+			      "This should never happen.",
+			      bg_update_ptr->job_ptr->job_id,
+			      bg_record->bg_block_id,
+			      found_record->job_ptr->job_id,
+			      found_record->bg_block_id,
+			      found_record->job_ptr->end_time);
+			requeue_job = 1;
+			break;
+		}
+
 		debug2("need to make sure %s is free, it's part of %s",
 		       found_record->bg_block_id, 
 		       bg_record->bg_block_id);
@@ -317,6 +392,33 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		num_block_to_free++;
 	}		
 	list_iterator_destroy(itr);
+
+	if(requeue_job) {
+		num_block_to_free = 0;
+		num_block_freed = 0;
+		list_destroy(delete_list);
+
+		_reset_block(bg_record);
+
+		slurm_mutex_unlock(&block_state_mutex);
+		/* wait for the slurmd to begin 
+		   the batch script, slurm_fail_job() 
+		   is a no-op if issued prior 
+		   to the script initiation do clean up just
+		   incase the fail job isn't ran */
+		sleep(2);	
+		lock_slurmctld(job_write_lock);
+		if((rc = job_requeue(0, bg_update_ptr->job_ptr->job_id, -1))) {
+			error("couldn't requeue job %u, failing it: %s",
+			      bg_update_ptr->job_ptr->job_id, 
+			      slurm_strerror(rc));
+			job_fail(bg_update_ptr->job_ptr->job_id);
+		}
+		unlock_slurmctld(job_write_lock);
+		slurm_mutex_unlock(&job_start_mutex);
+		return;
+	}	
+
 	free_block_list(delete_list);
 	list_destroy(delete_list);
 	slurm_mutex_unlock(&block_state_mutex);
@@ -422,20 +524,30 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	
 	if(bg_record->state == RM_PARTITION_FREE) {
 		if((rc = boot_block(bg_record)) != SLURM_SUCCESS) {
-			sleep(2);	
-			/* wait for the slurmd to begin 
-			   the batch script, slurm_fail_job() 
-			   is a no-op if issued prior 
-			   to the script initiation do clean up just
-			   incase the fail job isn't ran */
-			(void) slurm_fail_job(bg_update_ptr->job_ptr->job_id);
 			slurm_mutex_lock(&block_state_mutex);
+			_reset_block(bg_record);
 			if (remove_from_bg_list(bg_job_block_list, bg_record)
 			    == SLURM_SUCCESS) {
 				num_unused_cpus += bg_record->bp_count
 					*bg_record->cpus_per_bp;
 			}
 			slurm_mutex_unlock(&block_state_mutex);
+			sleep(2);	
+			/* wait for the slurmd to begin 
+			   the batch script, slurm_fail_job() 
+			   is a no-op if issued prior 
+			   to the script initiation do clean up just
+			   incase the fail job isn't ran */
+			lock_slurmctld(job_write_lock);
+			if((rc = job_requeue(
+				    0, bg_update_ptr->job_ptr->job_id, -1))) {
+				error("couldn't requeue job %u, failing it: %s",
+				      bg_update_ptr->job_ptr->job_id, 
+				      slurm_strerror(rc));
+				job_fail(bg_update_ptr->job_ptr->job_id);
+			}
+			lock_slurmctld(job_write_lock);
+
 			slurm_mutex_unlock(&job_start_mutex);
 			return;
 		}
@@ -609,37 +721,9 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 		}
 			
 		slurm_mutex_lock(&block_state_mutex);
-		if(bg_record->job_running > NO_JOB_RUNNING) {
-			bg_record->job_running = NO_JOB_RUNNING;
-			bg_record->job_ptr = NULL;
-		}
-		/* remove user from list */
-		
-		slurm_conf_lock();
-		if(bg_record->target_name) {
-			if(strcmp(bg_record->target_name, 
-				  slurmctld_conf.slurm_user_name)) {
-				xfree(bg_record->target_name);
-				bg_record->target_name = 
-					xstrdup(slurmctld_conf.
-						slurm_user_name);
-			}
-			update_block_user(bg_record, 1);
-		} else {
-			bg_record->target_name = 
-				xstrdup(slurmctld_conf.slurm_user_name);
-		}	
-		slurm_conf_unlock();
-			
-		bg_record->boot_state = 0;
-		bg_record->boot_count = 0;
+
+		_reset_block(bg_record);
 		
-		last_bg_update = time(NULL);
-		if(remove_from_bg_list(bg_job_block_list, bg_record) 
-		   == SLURM_SUCCESS) {
-			num_unused_cpus += 
-				bg_record->bp_count*bg_record->cpus_per_bp;
-		}
 		slurm_mutex_unlock(&block_state_mutex);
 		
 	} else {
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/plugin/bg_record_functions.c
index 9d7d8c7d94d4131061f1625625cec5fb3a32c47f..20658b7917c32485999b995ff2dc565926a5efc1 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.c
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.c
@@ -645,7 +645,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 
 	bg_record->bg_block_list = list_create(destroy_ba_node);
 	if(used_nodes) {
-		if(copy_node_path(used_nodes, bg_record->bg_block_list)
+		if(copy_node_path(used_nodes, &bg_record->bg_block_list)
 		   == SLURM_ERROR)
 			error("couldn't copy the path for the allocation");
 		bg_record->bp_count = list_count(used_nodes);
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index b74d9550036b9889feb325f4c2aee5d2dc0128fe..d3ff7a7749de86c248dd7d87e7a4d515a8a028b6 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -201,11 +201,15 @@ extern void fini_bg(void)
 	ba_fini();
 }
 
+/* 
+ * block_state_mutex should be locked before calling this function
+ */
 extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b)
 {
 	bitstr_t *my_bitmap = NULL;
 
-	if(rec_a->bp_count > 1 && rec_a->bp_count > 1) {
+	if((rec_a->bp_count > 1) && (rec_b->bp_count > 1)) {
+		/* Test for conflicting passthroughs */
 		reset_ba_system(false);
 		check_and_set_node_list(rec_a->bg_block_list);
 		if(check_and_set_node_list(rec_b->bg_block_list)
@@ -607,6 +611,14 @@ extern void *mult_free_block(void *args)
 			usleep(100000);
 			continue;
 		}
+		if(bg_record->job_ptr) {
+			info("We are freeing a block (%s) that "
+			     "has job %u(%u), This should never happen.\n",
+			     bg_record->bg_block_id,
+			     bg_record->job_ptr->job_id, 
+			     bg_record->job_running);
+			term_jobs_on_block(bg_record->bg_block_id);
+		}
 		debug("freeing the block %s.", bg_record->bg_block_id);
 		bg_free_block(bg_record);	
 		debug("done\n");
diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/plugin/defined_block.c
index f915dfdc39c6df30896178594c7bc9f5d71795a7..86a0dd645f418e50775fc8d9c29136d61aefafe6 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.c
+++ b/src/plugins/select/bluegene/plugin/defined_block.c
@@ -182,7 +182,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						list_create(destroy_ba_node);
 					copy_node_path(
 						results, 
-						bg_record->bg_block_list);
+						&bg_record->bg_block_list);
 					list_destroy(results);
 				}
 			}
@@ -363,7 +363,7 @@ extern int create_full_system_block(List bg_found_block_list)
 	if(bg_record->bg_block_list)
 		list_destroy(bg_record->bg_block_list);
 	bg_record->bg_block_list = list_create(destroy_ba_node);
-	copy_node_path(results, bg_record->bg_block_list);
+	copy_node_path(results, &bg_record->bg_block_list);
 	list_destroy(results);
 				
 	if((rc = configure_block(bg_record)) == SLURM_ERROR) {
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 9b0769594a32ced6a5584b900592dfc1e91ae1a2..31e681dd2391ebcf89fd677072c66918fea907e2 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -431,6 +431,7 @@ extern int select_p_state_restore(char *dir_name)
 		error("select_p_state_restore: problem unpacking node_info");
 		goto unpack_error;
 	}
+	slurm_mutex_lock(&block_state_mutex);
 	reset_ba_system(false);
 
 	node_bitmap = bit_alloc(node_record_count);	
@@ -481,11 +482,9 @@ extern int select_p_state_restore(char *dir_name)
 
 		list_iterator_reset(itr);
 		if(bg_record) {
-			slurm_mutex_lock(&block_state_mutex);
 			if(bg_info_record->state == RM_PARTITION_ERROR)
 				bg_record->job_running = BLOCK_ERROR_STATE;
 			bg_record->state = bg_info_record->state;
-			slurm_mutex_unlock(&block_state_mutex);
 			blocks++;
 		} else {
 			int ionodes = 0;
@@ -602,7 +601,7 @@ extern int select_p_state_restore(char *dir_name)
 				list_destroy(bg_record->bg_block_list);
 			bg_record->bg_block_list =
 				list_create(destroy_ba_node);
-			copy_node_path(results, bg_record->bg_block_list);
+			copy_node_path(results, &bg_record->bg_block_list);
 			list_destroy(results);			
 			
 			configure_block(bg_record);
@@ -614,7 +613,6 @@ extern int select_p_state_restore(char *dir_name)
 	FREE_NULL_BITMAP(node_bitmap);
 	list_iterator_destroy(itr);
 
-	slurm_mutex_lock(&block_state_mutex);
 	sort_bg_record_inc_size(bg_list);
 	slurm_mutex_unlock(&block_state_mutex);
 		
diff --git a/src/sacct/options.c b/src/sacct/options.c
index dc205e0fcae16886043955c3a235c59781ea353b..b7c2737fc28d3db360364ea106cf00d74aef845f 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -1,9 +1,8 @@
 /*****************************************************************************\
  *  options.c - option functions for sacct
- *
- *  $Id: options.c 7541 2006-03-18 01:44:58Z da $
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -852,18 +851,8 @@ void parse_command_line(int argc, char **argv)
 			break;
 
 		case 'V':
-		{
-			char	obuf[20]; /* should be long enough */
-			char	*rev="$Revision: 7267 $";
-			char	*s;
-
-			s=strstr(rev, " ")+1;
-			for (i=0; s[i]!=' '; i++)
-				obuf[i]=s[i];
-			obuf[i] = 0;
-			printf("%s: %s\n", argv[0], obuf);
+			printf("%s %s\n", PACKAGE, SLURM_VERSION);
 			exit(0);
-		}
 
 		case ':':
 		case '?':	/* getopt() has explained it */
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 9536f3f42a21ecdd04ca20d370adf804f28a602e..725d9ca98847925186ef3112623b2f5a5e4f811d 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -255,6 +255,38 @@ static int _set_rec(int *start, int argc, char *argv[],
 	return 0;
 }
 
+static int _isdefault(List acct_list)
+{
+	int rc = 0;
+	acct_user_cond_t user_cond;
+	List ret_list = NULL;
+
+	if(!acct_list || !list_count(acct_list))
+		return rc;
+
+	memset(&user_cond, 0, sizeof(acct_user_cond_t));
+	user_cond.def_acct_list = acct_list;
+
+	ret_list = acct_storage_g_get_users(db_conn, my_uid, &user_cond);
+	if(ret_list && list_count(ret_list)) {
+		ListIterator itr = list_iterator_create(ret_list);
+		acct_user_rec_t *user = NULL;
+		fprintf(stderr," Users listed below have these "
+			"as their Default Accounts.\n");
+		while((user = list_next(itr))) {
+			fprintf(stderr, " User - %-10.10s Account - %s\n",
+				user->name, user->default_acct);
+		}
+		list_iterator_destroy(itr);
+		rc = 1;		
+	}
+
+	if(ret_list)
+		list_destroy(ret_list);
+
+	return rc;
+}
+
 extern int sacctmgr_add_account(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
@@ -1361,7 +1393,23 @@ extern int sacctmgr_delete_account(int argc, char *argv[])
 	
 	if(ret_list && list_count(ret_list)) {
 		char *object = NULL;
-		ListIterator itr = list_iterator_create(ret_list);
+		ListIterator itr = NULL;
+
+		/* Check to see if person is trying to remove a default
+		 * account of a user.
+		 */
+		if(_isdefault(ret_list)) {
+			exit_code=1;
+			fprintf(stderr, " Please either remove accounts listed "
+				"above from list and resubmit,\n"
+				" or change these users default account to "
+				"remove the account(s).\n"
+				" Changes Discarded\n");
+			list_destroy(ret_list);
+			acct_storage_g_commit(db_conn, 0);
+			return SLURM_ERROR;	
+		}
+		itr = list_iterator_create(ret_list);
 		if(set == 1) {
 			printf(" Deleting accounts...\n");
 		} else if(set == 2 || set == 3) {
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 26ea0082a82d599c8f451d3af8cfaa9f0ab9ae21..2d4efcf4edcff41efa24246527b6c1d4268caf8a 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -38,6 +38,7 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
+#include "src/common/uid.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
 		     List cluster_list,
@@ -709,6 +710,7 @@ extern int sacctmgr_delete_cluster(int argc, char *argv[])
 extern int sacctmgr_dump_cluster (int argc, char *argv[])
 {
 	acct_user_cond_t user_cond;
+	acct_user_rec_t *user = NULL;
 	acct_association_cond_t assoc_cond;
 	List assoc_list = NULL;
 	List acct_list = NULL;
@@ -716,9 +718,38 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	List sacctmgr_assoc_list = NULL;
 	char *cluster_name = NULL;
 	char *file_name = NULL;
+	char *user_name = NULL;
 	int i;
 	FILE *fd = NULL;
 
+	memset(&user_cond, 0, sizeof(acct_user_cond_t));
+	user_cond.with_coords = 1;
+
+	user_list = acct_storage_g_get_users(db_conn, my_uid, &user_cond);
+	/* make sure this person running is an admin */
+	user_name = uid_to_string(my_uid);
+	if(!(user = sacctmgr_find_user_from_list(user_list, user_name))) {
+		exit_code=1;
+		fprintf(stderr, " Your uid (%u) is not in the "
+			"accounting system, can't dump cluster.\n", my_uid);
+		xfree(user_name);
+		if(user_list)
+			list_destroy(user_list);
+		return SLURM_ERROR;
+		
+	} else {
+		if(user->admin_level < ACCT_ADMIN_SUPER_USER) {
+			exit_code=1;
+			fprintf(stderr, " Your user does not have sufficient "
+				"privileges to dump clusters.\n");
+			if(user_list)
+				list_destroy(user_list);
+			xfree(user_name);
+			return SLURM_ERROR;
+		}
+	}
+	xfree(user_name);
+
 	for (i=0; i<argc; i++) {
 		int end = parse_option_end(argv[i]);
 		if(!end) {
@@ -782,17 +813,13 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	} else if(!list_count(assoc_list)) {
 		exit_code=1;
 		fprintf(stderr, " Cluster %s returned nothing.", cluster_name);
+		list_destroy(assoc_list);
 		xfree(cluster_name);
 		return SLURM_ERROR;
 	}
 
 	sacctmgr_assoc_list = sacctmgr_get_hierarchical_list(assoc_list);
 
-	memset(&user_cond, 0, sizeof(acct_user_cond_t));
-	user_cond.with_coords = 1;
-
-	user_list = acct_storage_g_get_users(db_conn, my_uid, &user_cond);
-
 	acct_list = acct_storage_g_get_accounts(db_conn, my_uid, NULL);
 
 	
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index e53d5e3b8bbb77b563c5624e28651dd5d43d0532..412e79a43749b11487230acc1b72f44f3b3cda30 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -38,6 +38,7 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
+#include "src/common/uid.h"
 
 typedef struct {
 	acct_admin_level_t admin;
@@ -1439,10 +1440,14 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	char line[BUFFER_SIZE];
 	FILE *fd = NULL;
 	char *parent = NULL;
+	char *file_name = NULL;
 	char *cluster_name = NULL;
+	char *user_name = NULL;
 	char object[25];
 	int start = 0, len = 0, i = 0;
 	int lc=0, num_lines=0;
+	int start_clean=0;
+	int cluster_name_set=0;
 	int rc = SLURM_SUCCESS;
 
 	sacctmgr_file_opts_t *file_opts = NULL;
@@ -1475,19 +1480,92 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	
 	int set = 0;
 	
-	fd = fopen(argv[0], "r");
+	if(readonly_flag) {
+		exit_code = 1;
+		fprintf(stderr, "Can't run this command in readonly mode.\n");
+		return;		
+	}
+
+	/* reset the connection to get the most recent stuff */
+	acct_storage_g_commit(db_conn, 0);
+
+	memset(&user_cond, 0, sizeof(acct_user_cond_t));
+	user_cond.with_coords = 1;
+	curr_user_list = acct_storage_g_get_users(db_conn, my_uid, &user_cond);
+
+	/* make sure this person running is an admin */
+	user_name = uid_to_string(my_uid);
+	if(!(user = sacctmgr_find_user_from_list(curr_user_list, user_name))) {
+		exit_code=1;
+		fprintf(stderr, " Your uid (%u) is not in the "
+			"accounting system, can't load file.\n", my_uid);
+		if(curr_user_list)
+			list_destroy(curr_user_list);
+		xfree(user_name);
+		return;
+		
+	} else {
+		if(user->admin_level < ACCT_ADMIN_SUPER_USER) {
+			exit_code=1;
+			fprintf(stderr, " Your user does not have sufficient "
+				"privileges to load files.\n");
+			if(curr_user_list)
+				list_destroy(curr_user_list);
+			xfree(user_name);
+			return;
+		}
+	}
+	xfree(user_name);
+
+	for (i=0; i<argc; i++) {
+		int end = parse_option_end(argv[i]);
+
+		if(!end && !strncasecmp(argv[i], "clean", 3)) {
+			start_clean = 1;
+		} else if(!end || !strncasecmp (argv[i], "File", 1)) {
+			if(file_name) {
+				exit_code=1;
+				fprintf(stderr, 
+					" File name already set to %s\n",
+					file_name);
+				continue;
+			}		
+			file_name = xstrdup(argv[i]+end);
+		} else if (!strncasecmp (argv[i], "Cluster", 3)) {
+			if(cluster_name) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Can only do one cluster at a time.  "
+					"Already doing %s\n", cluster_name);
+				continue;
+			}
+			cluster_name = xstrdup(argv[i]+end);
+			cluster_name_set = 1;
+		} else {
+			exit_code=1;
+			fprintf(stderr, " Unknown option: %s\n", argv[i]);
+		}		
+	}
+
+	if(!file_name) {
+		exit_code=1;
+		xfree(cluster_name);
+		fprintf(stderr, 
+			" No filename given, specify one with file=''\n");
+		return;
+		
+	}
+
+	fd = fopen(file_name, "r");
+	xfree(file_name);
 	if (fd == NULL) {
 		exit_code=1;
 		fprintf(stderr, " Unable to read \"%s\": %m\n", argv[0]);
+		xfree(cluster_name);
 		return;
 	}
 
 	curr_acct_list = acct_storage_g_get_accounts(db_conn, my_uid, NULL);
-	curr_cluster_list = acct_storage_g_get_clusters(db_conn, my_uid, NULL);
-
-	memset(&user_cond, 0, sizeof(acct_user_cond_t));
-	user_cond.with_coords = 1;
-	curr_user_list = acct_storage_g_get_users(db_conn, my_uid, &user_cond);
 
 	/* These are new info so they need to be freed here */
 	acct_list = list_create(destroy_acct_account_rec);
@@ -1499,8 +1577,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	mod_user_list = list_create(destroy_acct_user_rec);
 	mod_assoc_list = list_create(destroy_acct_association_rec);
 
-	format_list = list_create(slurm_destroy_char);
-	
+	format_list = list_create(slurm_destroy_char);	
 
 	while((num_lines = _get_next_line(line, BUFFER_SIZE, fd)) > 0) {
 		lc += num_lines;
@@ -1540,7 +1617,6 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				object, lc);
 			rc = SLURM_ERROR;
 			break;
-			
 		}
 		start++;
 		
@@ -1548,7 +1624,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 		   || !strcasecmp("Cluster", object)) {
 			acct_association_cond_t assoc_cond;
 
-			if(cluster_name) {
+			if(cluster_name && !cluster_name_set) {
 				exit_code=1;
 				fprintf(stderr, " You can only add one cluster "
 				       "at a time.\n");
@@ -1565,7 +1641,44 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				rc = SLURM_ERROR;
 				break;
 			}
-			cluster_name = xstrdup(file_opts->name);
+
+			if(!cluster_name_set)
+				cluster_name = xstrdup(file_opts->name);
+			if(start_clean) {
+				acct_cluster_cond_t cluster_cond;
+				List ret_list = NULL;
+
+				if(!commit_check("You requested to flush "
+						 "the cluster before "
+						 "adding it again.\n"
+						 "Are you sure you want "
+						 "to continue?")) {
+					printf("Aborted\n");
+					break;
+				}		
+
+				memset(&cluster_cond, 0, 
+				       sizeof(acct_cluster_cond_t));
+				cluster_cond.cluster_list = list_create(NULL);
+				list_append(cluster_cond.cluster_list,
+					    cluster_name);
+
+				notice_thread_init();
+				ret_list = acct_storage_g_remove_clusters(
+					db_conn, my_uid, &cluster_cond);
+				notice_thread_fini();
+				list_destroy(cluster_cond.cluster_list);
+
+				if(!ret_list) {
+					exit_code=1;
+					fprintf(stderr, " There was a problem "
+						"removing the cluster.\n");
+					rc = SLURM_ERROR;
+					break;
+				}
+			}
+			curr_cluster_list = acct_storage_g_get_clusters(
+				db_conn, my_uid, NULL);
 
 			if(cluster_name)
 				info("For cluster %s", cluster_name);
@@ -1609,6 +1722,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 			
 			memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 			assoc_cond.cluster_list = list_create(NULL);
+			assoc_cond.without_parent_limits = 1;
 			list_append(assoc_cond.cluster_list, cluster_name);
 			curr_assoc_list = acct_storage_g_get_associations(
 				db_conn, my_uid, &assoc_cond);
@@ -2030,8 +2144,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 		set = 1;
 	}
 	END_TIMER2("add cluster");
-		
-	info("Done adding cluster in %s", TIME_STR);
+
+	if(set)
+		info("Done adding cluster in %s", TIME_STR);
 		
 	if(rc == SLURM_SUCCESS) {
 		if(set) {
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 68f0adff99ffb794f1fa4d5540f078b6459b50bd..98cdb93353f07de32b2f57c7b66641d710ceea64 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -171,7 +171,10 @@ main (int argc, char *argv[])
 		log_alter(opts, 0, NULL);
 	}
 
-	db_conn = acct_storage_g_get_connection(false, rollback_flag);
+	/* always do a rollback.  If you don't then if there is an
+	 * error you can not rollback ;)
+	 */
+	db_conn = acct_storage_g_get_connection(false, 1);
 	my_uid = getuid();
 
 	if (input_field_count)
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index c59fc796c63f8ebd9aa4b9d6e5628a55717aeb6d..e042305c5e562a4236c64a458477741fbd2c32fa 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -350,6 +350,24 @@ static bool contains_null_char(const void *buf, int size)
 	return false;
 }
 
+/*
+ * Checks if the buffer contains any DOS linebreak (\r\n).
+ */
+static bool contains_dos_linebreak(const void *buf, int size)
+{
+	char *str = (char *)buf;
+	char prev_char = '\0';
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (prev_char == '\r' && str[i] == '\n')
+			return true;
+		prev_char = str[i];
+	}
+
+	return false;
+}
+
 /*
  * If "filename" is NULL, the batch script is read from standard input.
  */
@@ -413,6 +431,10 @@ static void *get_script_buffer(const char *filename, int *size)
 		error("The SLURM controller does not allow scripts that");
 		error("contain a NULL character '\\0'.");
 		goto fail;
+	} else if (contains_dos_linebreak(buf, script_size)) {
+		error("Batch script contains DOS line breaks (\\r\\n)");
+		error("instead of expected UNIX line breaks (\\n).");
+		goto fail;
 	}
 
 	*size = script_size;
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index a9187825733fda4669d08baaed57058c412d36ef..387091e15831c2b756abae695323e08d4ab89927 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -49,6 +49,7 @@ int exit_flag;		/* program to terminate if =1 */
 int input_words;	/* number of words of input permitted */
 int one_liner;		/* one record per line if =1 */
 int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
+int verbosity;		/* count of "-v" options */
 
 static void	_delete_it (int argc, char *argv[]);
 static int	_get_command (int *argc, char *argv[]);
@@ -90,6 +91,7 @@ main (int argc, char *argv[])
 	exit_flag         = 0;
 	input_field_count = 0;
 	quiet_flag        = 0;
+	verbosity         = 0;
 	log_init("scontrol", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
 	if (getenv ("SCONTROL_ALL"))
@@ -99,7 +101,8 @@ main (int argc, char *argv[])
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
-			fprintf(stderr, "Try \"scontrol --help\" for more information\n");
+			fprintf(stderr, "Try \"scontrol --help\" for "
+				"more information\n");
 			exit(1);
 			break;
 		case (int)'a':
@@ -120,6 +123,7 @@ main (int argc, char *argv[])
 			break;
 		case (int)'v':
 			quiet_flag = -1;
+			verbosity++;
 			break;
 		case (int)'V':
 			_print_version();
@@ -127,11 +131,17 @@ main (int argc, char *argv[])
 			break;
 		default:
 			exit_code = 1;
-			fprintf(stderr, "getopt error, returned %c\n", opt_char);
+			fprintf(stderr, "getopt error, returned %c\n", 
+				opt_char);
 			exit(exit_code);
 		}
 	}
 
+	if (verbosity) {
+		opts.stderr_level += verbosity;
+		log_alter(opts, SYSLOG_FACILITY_USER, NULL);
+	}
+
 	if (argc > MAX_INPUT_FIELDS)	/* bogus input, but continue anyway */
 		input_words = argc;
 	else
diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c
index 1313dfbb6d519a432f77a280f7a71d72988efbde..58cb1aba64f9eac01493a5e2aa6f9a8d542e17fa 100644
--- a/src/sinfo/opts.c
+++ b/src/sinfo/opts.c
@@ -1,7 +1,8 @@
 /****************************************************************************\
  *  opts.c - sinfo command line option processing functions
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
@@ -122,7 +123,8 @@ extern void parse_command_line(int argc, char *argv[])
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
-			fprintf(stderr, "Try \"sinfo --help\" for more information\n");
+			fprintf(stderr, 
+				"Try \"sinfo --help\" for more information\n");
 			exit(1);
 			break;
 		case (int)'a':
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index 8fb936d1e4cc7505a4f0665fa92536084e9a5c69..7cef6e0c7f3a247af7c77d5fe2752fefdeee31a7 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -1,9 +1,8 @@
 /*****************************************************************************\
  *  sinfo.c - Report overall state the system
- *
- *  $Id$
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index f32fe6762da876a56c1bf7f1b44d5331d4c4a64f..165d92c96f267834039fcf236cfd958285c86419 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -708,7 +708,9 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 {
 	slurm_fd newsockfd;
 	slurm_fd sockfd;
-	slurm_addr cli_addr;
+	slurm_addr cli_addr, srv_addr;
+	uint16_t port;
+	char ip[32];
 	pthread_t thread_id_rpc_req;
 	pthread_attr_t thread_attr_rpc_req;
 	int no_thread;
@@ -735,6 +737,9 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 	    == SLURM_SOCKET_ERROR)
 		fatal("slurm_init_msg_engine_port error %m");
 	unlock_slurmctld(config_read_lock);
+	slurm_get_stream_addr(sockfd, &srv_addr);
+	slurm_get_ip_str(&srv_addr, &port, ip, sizeof(ip));
+	debug2("slurmctld listening on %s:%d", ip, ntohs(port));
 
 	/* Prepare to catch SIGUSR1 to interrupt accept().
 	 * This signal is generated by the slurmctld signal
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index e729abe20bd280983aab9bf3c588f2a6c6de66e2..dedbe81271304c59248f968aa0fd90e3c722d87a 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -968,7 +968,7 @@ extern int job_req_node_filter(struct job_record *job_ptr,
  * job_requeue - Requeue a running or pending batch job
  * IN uid - user id of user issuing the RPC
  * IN job_id - id of the job to be requeued
- * IN conn_fd - file descriptor on which to send reply
+ * IN conn_fd - file descriptor on which to send reply, -1 if none
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd conn_fd);
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index 087e0bcc83a3f44735b7e30a86a2a0c0252b106e..a16c21901a45f80203d6f9d93588e1903120ffcc 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -40,15 +40,17 @@
 #  include "config.h"
 #endif
 
-#include <sys/wait.h>
-#include <sys/stat.h>
-#include <sys/param.h>
-#include <unistd.h>
-#include <pwd.h>
-#include <grp.h>
-#include <string.h>
 #include <assert.h>
 #include <ctype.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <pwd.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
 
 #if HAVE_STDLIB_H
 #  include <stdlib.h>
@@ -294,7 +296,7 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 {
 	char c;
 	uint32_t *gtids;		/* pointer to arrary of ranks */
-	int j;
+	int fd, j;
 	int rc;
 	slurmd_task_info_t *task = job->task[i];
 
@@ -430,6 +432,21 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 	/* 
 	 * print error message and clean up if execve() returns:
 	 */
+	if ((errno == ENOENT) &&
+	    ((fd = open(task->argv[0], O_RDONLY)) >= 0)) {
+		char buf[256], *eol;
+		int sz;
+		sz = read(fd, buf, sizeof(buf));
+		if ((sz >= 3) && (strncmp(buf, "#!", 2) == 0)) {
+			eol = strchr(buf, '\n');
+			if (eol)
+				eol[0] = '\0';
+			else
+				buf[sizeof(buf)-1] = '\0';
+			error("execve(): bad interpreter(%s): %m", buf+2);
+			exit(errno);
+		}
+	}
 	error("execve(): %s: %m", task->argv[0]); 
 	exit(errno);
 }
diff --git a/src/sview/grid.c b/src/sview/grid.c
index 1ebdcc20e8439c0527f03fb1ee3e46ea6f90c9aa..c3c123fde6f4966e2e5670e1baf5ef11ff985301 100644
--- a/src/sview/grid.c
+++ b/src/sview/grid.c
@@ -661,7 +661,7 @@ extern int setup_grid_table(GtkTable *table, List button_list, List node_list)
 	ListIterator itr = NULL;
 	sview_node_info_t *sview_node_info_ptr = NULL;
 #ifdef HAVE_BG
-	int y=0, z=0, x_offset=0, y_offset=0;
+	int y=0, z=0, x_offset=0, y_offset=0, default_y_offset=0;
 #endif
 
 	if(!node_list) {
@@ -693,14 +693,19 @@ extern int setup_grid_table(GtkTable *table, List button_list, List node_list)
 	 * get the nodes from the controller going up from the Z dim
 	 * instead of laying these out in a nice X fashion
 	 */
+	
+	default_y_offset = (DIM_SIZE[Z] * DIM_SIZE[Y]) 
+		+ (DIM_SIZE[Y] - DIM_SIZE[Z]);
+
 	for (x=0; x<DIM_SIZE[X]; x++) {
-		y_offset = (DIM_SIZE[Z] * DIM_SIZE[Y]);
+		y_offset = default_y_offset;
+			
 		for (y=0; y<DIM_SIZE[Y]; y++) {
 			coord_y = y_offset - y;
 			x_offset = DIM_SIZE[Z] - 1;
 			for (z=0; z<DIM_SIZE[Z]; z++){
 				coord_x = x + x_offset;
-				
+			
 				grid_button = xmalloc(sizeof(grid_button_t));
 				grid_button->inx = i++;
 				grid_button->table = table;
diff --git a/testsuite/expect/test21.15 b/testsuite/expect/test21.15
index 32ab174ea57b700dfb31301cdacf4da2e42e676c..529e24a4cda1c039c4b678a2008d53aaa67aefcb 100755
--- a/testsuite/expect/test21.15
+++ b/testsuite/expect/test21.15
@@ -615,7 +615,7 @@ proc _add_user { account adminlevel cluster defaultaccount fs maxcpu maxjob maxn
 #
 # Use sacctmgr to remove an user
 #
-proc _remove_user { user name } {
+proc _remove_user { cluster acct user } {
         global sacctmgr timeout
 
 	set exit_code 0
@@ -623,15 +623,20 @@ proc _remove_user { user name } {
 	set nothing 1
 	set check "Deleting user"
 
-	if { ![string length $name] } {
+	if { ![string length $user] } {
 		send_user "FAILURE: we need a name to remove\n"
 		return 1
 	}
 
-	set command "$name"
+	set command "$user"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
+		set check "Deleting user associations"
+	}
 
-	if { [string length $user] } {
-		set command "$command user=$user"
+	if { [string length $acct] } {
+		set command "$command account=$acct"
 		set check "Deleting user associations"
 	}
 
@@ -689,7 +694,7 @@ proc _remove_user { user name } {
 }
 
 #make sure we have a clean system and permission to do this work
-_remove_user "" "$us1,$us2,$us3"
+_remove_user "" "" "$us1,$us2,$us3"
 _remove_acct "" "$nm1,$nm2,$nm3"
 _remove_cluster "$tc1,$tc2,$tc3"
 if {$access_err != 0} {
@@ -700,7 +705,7 @@ if {$access_err != 0} {
 #add cluster
 incr exit_code [_add_cluster "$tc1,$tc2,$tc3"]
 if { $exit_code } {
-	_remove_user "" "$us1,$us2,$us3"
+	_remove_user "" "" "$us1,$us2,$us3"
 	_remove_acct "" "$nm1,$nm2,$nm3"
 	_remove_cluster "$tc1,$tc2,$tc3"
 	exit $exit_code
@@ -709,7 +714,7 @@ if { $exit_code } {
 #add accounts
 incr exit_code [_add_acct "$tc1,$tc2,$tc3" "$nm1,$nm2,$nm3"]
 if { $exit_code } {
-	_remove_user "" "$us1,$us2,$us3"
+	_remove_user "" "" "$us1,$us2,$us3"
 	_remove_acct "" "$nm1,$nm2,$nm3"
 	_remove_cluster "$tc1,$tc2,$tc3"
 	exit $exit_code
@@ -719,7 +724,7 @@ if { $exit_code } {
 # account adminlevel cluster defaultaccount fs maxcpu maxjob maxnodes maxwall name
 incr exit_code [_add_user "$nm1,$nm2,$nm3" "$alo" "$tc1,$tc2,$tc3" "$nm2" "$fs1" "$mc1" "$mj1" "$mn1" "$mw1" $us1]
 if { $exit_code } {
-	_remove_user "" "$us1,$us2,$us3"
+	_remove_user "" "" "$us1,$us2,$us3"
 	_remove_acct "" "$nm1,$nm2,$nm3"
 	_remove_cluster "$tc1,$tc2,$tc3"
 	exit $exit_code
@@ -756,7 +761,9 @@ if {$matches != 9} {
 
 # This is the end below here
 
-incr exit_code [_remove_user "$tc1,$tc2" "$nm1,$nm2,$nm3"]
+incr exit_code [_remove_user "$tc1" "" "$us1,$us2,$us3"]
+incr exit_code [_remove_user "" "$nm1,$nm2,$nm3" "$us1,$us2,$us3"]
+incr exit_code [_remove_user "" "" "$us1,$us2,$us3"]
 incr exit_code [_remove_acct "" "$nm1,$nm2,$nm3"]
 incr exit_code [_remove_cluster "$tc1,$tc2,$tc3"]
 
diff --git a/testsuite/expect/test21.16 b/testsuite/expect/test21.16
index 2ddf07e52b0e127c95cc7113407d7980f38aaec3..5d318f87e601ef4fc0c9c58611e061e1b7210ff0 100755
--- a/testsuite/expect/test21.16
+++ b/testsuite/expect/test21.16
@@ -615,7 +615,7 @@ proc _add_user { account adminlevel cluster defaultaccount fs maxcpu maxjob maxn
 #
 # Use sacctmgr to remove an user
 #
-proc _remove_user { user name } {
+proc _remove_user { acct user } {
         global sacctmgr timeout
 
 	set exit_code 0
@@ -623,15 +623,15 @@ proc _remove_user { user name } {
 	set nothing 1
 	set check "Deleting user"
 
-	if { ![string length $name] } {
+	if { ![string length $user] } {
 		send_user "FAILURE: we need a name to remove\n"
 		return 1
 	}
 
-	set command "$name"
+	set command "$user"
 
-	if { [string length $user] } {
-		set command "$command user=$user"
+	if { [string length $acct] } {
+		set command "$command account=$acct"
 		set check "Deleting user associations"
 	}
 
@@ -756,7 +756,7 @@ if {$matches != 27} {
 
 # This is the end below here
 
-incr exit_code [_remove_user "$tc1,$tc2" "$nm1,$nm2,$nm3"]
+incr exit_code [_remove_user "" "$us1,$us2,$us3"]
 incr exit_code [_remove_acct "" "$nm1,$nm2,$nm3"]
 incr exit_code [_remove_cluster "$tc1,$tc2,$tc3"]
 
diff --git a/testsuite/expect/test21.17 b/testsuite/expect/test21.17
index f14db96697cd44e33e6c1b134bc6db38b4323d47..e7376f3debc98c9b3c32bb5b5420a7b26da815e3 100755
--- a/testsuite/expect/test21.17
+++ b/testsuite/expect/test21.17
@@ -899,7 +899,7 @@ if {$matches != 27} {
 
 # This is the end below here
 
-incr exit_code [_remove_user "$tc1,$tc2" "$nm1,$nm2,$nm3"]
+incr exit_code [_remove_user "" "$us1,$us2,$us3"]
 incr exit_code [_remove_acct "" "$nm1,$nm2,$nm3"]
 incr exit_code [_remove_cluster "$tc1,$tc2,$tc3"]
 
diff --git a/testsuite/expect/test21.18 b/testsuite/expect/test21.18
index fef761ac31a754cedf206bcc0cafe1ae78970650..84662a10b0de1fbb020c8a12a8c71fb126b505ba 100755
--- a/testsuite/expect/test21.18
+++ b/testsuite/expect/test21.18
@@ -980,7 +980,7 @@ if {$matches != 18} {
 
 # This is the end below here
 
-incr exit_code [_remove_user "$tc1,$tc2" "$nm1,$nm2,$nm3"]
+incr exit_code [_remove_user "" "$us1,$us2,$us3"]
 incr exit_code [_remove_acct "" "$nm1,$nm2,$nm3"]
 incr exit_code [_remove_cluster "$tc1,$tc2,$tc3"]
 
diff --git a/testsuite/expect/test21.19 b/testsuite/expect/test21.19
index 789a61d9fad32fdbfe99c99dcc6c040f44517786..4ef4e67733024f9818b8456611ba2f9940277b13 100755
--- a/testsuite/expect/test21.19
+++ b/testsuite/expect/test21.19
@@ -1,7 +1,8 @@
 #!/usr/bin/expect
 ############################################################################
 # Purpose: Test of SLURM functionality
-#          sacctmgr add/delete coordinator
+#          sacctmgr add a coordinator
+#          
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
@@ -33,54 +34,66 @@ source ./globals
 
 set test_id     "21.19"
 set exit_code   0
-set acmatches   0
-set aamatches   0
-set camatches   0
-set cumatches   0
-set lmatches    0
-set damatches   0
-set dcmatches   0
-set dumatches   0
-set not_support 0
-set add		add
-set lis		list
-set del		delete
-set nams	Names
-set nam		Name
-set fs		Fairshare
-set mc		MaxCPUSecs
-set mj		MaxJobs
-set mn		MaxNodes
-set mw		MaxWall
-set clu		cluster
 set tc1		tcluster1
+set tc2		tcluster2
+set tc3		tcluster3
 set fs1		2500
+set fs2		1700
+set fs3		1
 set mc1		1000000
+set mc2		700000
+set mc3		1
 set mj1		50
+set mj2		70
+set mj3		1
 set mn1		300
+set mn2		700
+set mn3		1
 set mw1		01:00:00
+set mw2		00:07:00
+set mw3		00:01:00
+set clu		cluster
+set cl1		1tmach
+set cl2		2tmach
+set cl3		3tmach
+set acc		account
 set acc		account
-set acs		accounts
-set ass		associations
-set nm1		tacct1
+set nams	names
+set nm1		testaccta1
+set nm2		testaccta2
+set nm3		testaccta3
 set des		Description
-set ds1		"tacct1"
+set ds1		testaccounta1
+set ds2		testacct
 set org		Organization
-set or1		"acctorg2"
+set or1		accountorga1
+set or2		acttrg
 set qs		QosLevel
 set qs1		normal
+set par		parent
 set usr		user
 set us1		tuser1
+set us2		tuser2
+set us3		tuser3
 set al		AdminLevel
 set aln		None
+set ala		Administrator
+set alo		Operator
 set dac		DefaultAccount
-set cor		Coordinator
-set par		Partition
+set pts		Partitions
+set fs		fairshare
+set mc		maxcpu
+set mj		maxjob
+set mn		maxnode
+set mw		maxwall
 set dbu		debug
 set access_err  0
 
+
 print_header $test_id
 
+set timeout 60
+
 #
 # Check accounting config and bail if not found.
 #
@@ -88,274 +101,969 @@ if { [test_account_storage] == 0 } {
 	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
 	exit 0
 }
- 
+
 
 #
 # Use sacctmgr to create a cluster
-#
-set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
-expect {
-	-re "privilege to preform this action" {
-		set access_err 1
-		exp_continue
+#	
+proc _add_cluster {name} {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
 	}
-	-re "Adding Cluster" {
-		incr acmatches
-		exp_continue
+
+	set my_pid [spawn $sacctmgr -i add cluster $name]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding Cluster" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			exit_code 1
+		}
+		eof {
+			wait
+		}
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr add not responding\n"
-		slow_kill $sadd_pid
-		set exit_code 1
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding clusters
+	got $matches\n"
+		incr exit_code 1
 	}
-	eof {
-		wait
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
 	}
+	
+	return $exit_code
 }
-if {$access_err != 0} {
-	send_user "\nWARNING: not authorized to perform this test\n"
-	exit $exit_code
-}
-if {$acmatches != 1} {
-	send_user "\nFAILURE:  sacctmgr had a problem adding clusters
-	got $acmatches\n"
-	set exit_code 1
-}
 
-if { ![check_acct_associations] } {
-	send_user "\nFAILURE:  Our associations don't line up\n"
-	set exit_code 1
+#
+# Use sacctmgr to remove the test cluster
+#
+proc _remove_cluster {name} {
+        global access_err sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
+	}
+
+	set my_pid [spawn $sacctmgr -i delete cluster $name]
+	expect {
+		-re "privilege to perform this action" {
+			set access_err 1
+			exp_continue
+		}
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Deleting clusters" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$access_err != 0} {
+		return 1
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacctmgr had a problem deleting cluster got $matches\n"
+		incr exit_code 1
+	}
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
 }
 
 #
 # Use sacctmgr to add an account
 #
-set sadel_pid [spawn $sacctmgr -i $add $acc $clu=$tc1 $des="$ds1" $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1 $nams=$nm1 $org="$or1" $qs=$qs1]
-expect {
-	-re "Adding Account" {
-		incr aamatches
-		exp_continue
+proc _add_acct { cluster name } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr add not responding\n"
-		slow_kill $sadd_pid
-		set exit_code 1
+
+	set command "$name"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
 	}
-	eof {
-		wait
+
+	set my_pid [eval spawn $sacctmgr -i add account $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding Account" {
+			incr matches
+			exp_continue
+		}
+		-re "Associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 2} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding account.
+	got $matches\n"
+		incr exit_code 1
 	}
-}
 
-if {$aamatches != 1} {
-	send_user "\nFAILURE:  sacctmgr had a problem adding account.
-	got $aamatches\n"
-	set exit_code 1
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
 }
 
 #
-# Use sacctmgr to add a user
+# Use sacctmgr to remove an account
 #
-set as_list_pid [spawn $sacctmgr -i $add $usr $acs=$nm1 $al=$aln $clu=$tc1 $dac=$nm1 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1 $nams=$us1 $par=$dbu $qs=$qs1 ]
-expect {
-	-re "$nams *$nm1" {
-		exp_continue
+proc _remove_acct { cluster name } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 1
+	set check "Deleting account"
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr list associations not responding\n"
-		slow_kill $as_list_pid
-		set exit_code 1
+
+	set command "$name"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
+		set check "Deleting account associations"
 	}
-	eof {
-		wait
+
+	set my_pid [eval spawn $sacctmgr -i delete account $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "$check" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem deleting account.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
 	}
-}
 
-if { ![check_acct_associations] } {
-	send_user "\nFAILURE:  Our associations don't line up\n"
-	set exit_code 1
+	return $exit_code
 }
 
 #
-# Use sacctmgr to add a coordinator
+# Use sacctmgr to modify an account
 #
-set as_list_pid [spawn $sacctmgr -i $add $cor $acs=$nm1 $nams=$us1 ]
-expect {
-	-re "Adding *$cor" {
-		exp_continue
+proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall wdesc worg} {
+	global sacctmgr timeout
+	
+	set exit_code 0
+	set matches 0
+	set expected 0
+	set acct_stuff 0
+	set assoc_stuff 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to modify\n"
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr list associations not responding\n"
-		slow_kill $as_list_pid
-		set exit_code 1
+
+	#set up the where
+	set wcommand "where $name"
+
+	if { [string length $cluster] } {
+		set wcommand "$wcommand cluster=$cluster"
 	}
-	eof {
-		wait
+
+	if { [string length $wdesc] } {
+		set wcommand "$wcommand description='$wdesc'"
 	}
-}
 
-if { ![check_acct_associations] } {
-	send_user "\nFAILURE:  Our associations don't line up\n"
-	set exit_code 1
+	if { [string length $worg] } {
+		set wcommand "$wcommand organization='$worg'"
+	}
+
+	#set up the set
+	set scommand "set"
+	if { [string length $parent] } {
+		set scommand "$scommand parent=$parent"
+		set assoc_stuff 1
+	}
+
+	if { [string length $fs] } {
+		set scommand "$scommand fairshare=$fs"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxcpu] } {
+		set scommand "$scommand maxc=$maxcpu"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxjob] } {
+		set scommand "$scommand maxj=$maxjob"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxnodes] } {
+		set scommand "$scommand maxn=$maxnodes"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxwall] } {
+		set scommand "$scommand maxw=$maxwall"
+		set assoc_stuff 1
+	}
+
+	if { [string length $desc] } {
+		set scommand "$scommand description='$desc'"
+		set acct_stuff 1
+	}
+
+	if { [string length $org] } {
+		set scommand "$scommand organization='$org'"
+		set acct_stuff 1
+	}
+
+	incr expected $acct_stuff
+	incr expected $assoc_stuff
+
+	set my_pid [eval spawn $sacctmgr -i modify account $scommand $wcommand ]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Modified accounts" {
+			incr matches
+			exp_continue
+		}
+		-re "Modified account associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != $expected} {
+		send_user "\nFAILURE:  sacctmgr had a problem modifying account.
+	got $matches needed $expected\n"
+		incr exit_code 1
+	}
+	
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
 }
 
+
 #
-# Use sacctmgr to list the test coordinator user
+# Use sacctmgr to add an user
 #
-set as_list_pid [spawn $sacctmgr -n list $usr withcoor WithAssocs]
-expect {
-	-re "$us1 *$nm1 *$qs1 *None *$tc1 *$nm1 *$dbu *$fs1 *$mc1 *$mj1 *$mn1 *$mw1 *$nm1" {
-		incr cumatches
-		exp_continue
+proc _add_user { cluster account name } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr list user not responding\n"
-		slow_kill $as_list_pid
-		set exit_code 1
+
+	set command "$name"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
 	}
-	eof {
-		wait
+
+	if { [string length $account] } {
+		set command "$command account=$account"
+	}
+
+	if { [string length $name] } {
+		set command "$command name=$name"
 	}
-}
 
-if {$cumatches != 1} {
-	send_user "\nFAILURE:  sacctmgr had a problem finding coordinator user.
-	got $aamatches\n"
-	set exit_code 1
+	set my_pid [eval spawn $sacctmgr -i add user $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding User" {
+			incr matches
+			exp_continue
+		}
+		-re "Associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 2} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding user.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
 }
 
 #
-# Use sacctmgr to list the test coordinator account.
+# Use sacctmgr to remove an user
 #
-set as_list_pid [spawn $sacctmgr -n list $acc withcoor WithAssocs]
-expect {
-	-re "$nm1 *$ds1 *$or1 *$qs1 *$tc1 *root *$fs1 *$mc1 *$mj1 *$mn1 *$mw1 $us1" {
-		incr camatches
-		exp_continue
+proc _remove_user {  cluster acct user  } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 1
+	set check "Deleting user"
+
+	if { ![string length $user] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
 	}
-	-re "$nm1 *$ds1 *$or1 *$qs1 *$tc1 *$us1 *$fs1 *$mc1 *$mj1 *$mn1 *$mw1 $us1" {
-		incr camatches
-		exp_continue
+
+	set command "$user"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
+		set check "Deleting user associations"
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr list account not responding\n"
-		slow_kill $as_list_pid
-		set exit_code 1
+
+	if { [string length $acct] } {
+		set command "$command acct=$acct"
+		set check "Deleting user associations"
 	}
-	eof {
-		wait
+
+	set my_pid [eval spawn $sacctmgr -i delete user $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "$check" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem deleting user.
+	got $matches\n"
+		incr exit_code 1
 	}
+
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
 }
 
-if {$camatches != 2} {
-	send_user "\nFAILURE:  sacctmgr had a problem finding coordinator account.\n"
-	set exit_code 1
+#
+# Use sacctmgr to modify an user
+#
+proc _mod_user { adminlevel defaultaccount fs maxcpu maxjob maxnodes maxwall waccounts wcluster wnames} {
+	global sacctmgr timeout
+	
+	set exit_code 0
+	set matches 0
+	set expected 0
+	set acct_stuff 0
+	set assoc_stuff 0
+
+	if { ![string length $wnames] } {
+		send_user "FAILURE: we need a name to modify\n"
+		return 1
+	}
+
+	#set up the where
+	set wcommand "where"
+
+	if { [string length $wcluster] } {
+		set wcommand "$wcommand cluster=$wcluster"
+	}
+
+	if { [string length $wnames] } {
+		set wcommand "$wcommand names='$wnames'"
+	}
+
+	if { [string length $waccounts] } {
+		set wcommand "$wcommand account='$waccount'"
+	}
+
+	#set up the set
+	set scommand "set"
+
+	if { [string length $adminlevel] } {
+		set scommand "$scommand adminlevel=$adminlevel"
+		set acct_stuff 1
+	}
+
+	if { [string length $defaultaccount] } {
+		set scommand "$scommand defaultaccount='$defaultaccount'"
+		set acct_stuff 1
+	}
+
+	if { [string length $fs] } {
+		set scommand "$scommand fairshare=$fs"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxcpu] } {
+		set scommand "$scommand maxc=$maxcpu"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxjob] } {
+		set scommand "$scommand maxj=$maxjob"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxnodes] } {
+		set scommand "$scommand maxn=$maxnodes"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxwall] } {
+		set scommand "$scommand maxw=$maxwall"
+		set assoc_stuff 1
+	}
+
+	incr expected $acct_stuff
+	incr expected $assoc_stuff
+
+	set my_pid [eval spawn $sacctmgr -i modify user $scommand $wcommand ]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Modified account associations" {
+			incr matches
+			exp_continue
+		}
+		-re "Modified users" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr modify not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != $expected} {
+		send_user "\nFAILURE:  sacctmgr had a problem modifying user.
+	got $matches needed $expected\n"
+		incr exit_code 1
+	}
+	
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
 }
 
 #
-# Use sacctmgr to delete the test coordinator
+# Use sacctmgr to add a coordinator
 #
-set sadel_pid [spawn $sacctmgr -i $del $cor $us1]
-expect {
-	-re "Deleting users" {
-		incr dumatches
-		exp_continue
+proc _add_coor { accounts names } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $names] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr delete not responding\n"
-		slow_kill $sadel_pid
-		set exit_code 1
+
+	set command "$names"
+
+	if { [string length $accounts] } {
+		set command "$command accounts=$accounts"
 	}
-	eof {
-		wait
+
+#	if { [string length $names] } {
+#		set command "$command names=$names"
+#	}
+
+	set my_pid [eval spawn $sacctmgr -i add coordinator $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding Coordinator" {
+			incr matches
+			exp_continue
+		}
+		-re "Associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding coordinator.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
 	}
+	return $exit_code
 }
 
 #
-# Use sacctmgr to delete the test user
+# Use sacctmgr to remove a coordinator
 #
-set sadel_pid [spawn $sacctmgr -i $del $usr $us1]
-expect {
-	-re "Deleting users" {
-		incr dumatches
-		exp_continue
+proc _remove_coor { accounts names } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 1
+	set check "Deleting user"
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr delete not responding\n"
-		slow_kill $sadel_pid
-		set exit_code 1
+
+	set command "$name"
+
+	if { [string length $accounts] } {
+		set command "$command accounts=$accounts"
+		set check "Deleting coordinator associations"
 	}
-	eof {
-		wait
+
+	if { [string length $names] } {
+		set command "$command names=$names"
+		set check "Deleting coordinator associations"
 	}
+
+	set my_pid [eval spawn $sacctmgr -i delete coordinator $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "$check" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem deleting coordinator.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
+}
+
+#make sure we have a clean system and permission to do this work
+_remove_user "" "" "$us1,$us2,$us3"
+_remove_acct "" "$nm1,$nm2,$nm3"
+_remove_cluster "$tc1,$tc2,$tc3"
+if {$access_err != 0} {
+	send_user "\nWARNING: not authorized to perform this test\n"
+	exit $exit_code
+}
+
+#add cluster
+incr exit_code [_add_cluster "$tc1,$tc2,$tc3"]
+if { $exit_code } {
+	_remove_user "" "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#add accounts
+incr exit_code [_add_acct "$tc1,$tc2,$tc3" "$nm1,$nm2,$nm3"]
+if { $exit_code } {
+	_remove_user "" "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#add users
+incr exit_code [_add_user  "$tc1,$tc2,$tc3" "$nm1,$nm2,$nm3" "$us1,$us2,$us3"]
+if { $exit_code } {
+	_remove_user "" "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
 }
 
-if {$dumatches != 1} {
-	send_user "\nFAILURE: sacctmgr had a problem deleting user got $dumatches\n"
-	set exit_code 1
+# Add a coordinator 
+# accounts names
+incr exit_code [_add_coor  $nm1 $us1]
+if { $exit_code } {
+	_remove_user "" "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
 }
 
 #
-# Use sacctmgr to delete the test account
+# Use sacctmgr to list the test user modification
 #
-set sadel_pid [spawn $sacctmgr -i $del $acc $nm1]
+set matches 0
+set my_pid [spawn $sacctmgr -n -p list user names=$us1,$us2,$us3 withcoor]
 expect {
-	-re "Deleting account" {
-		incr damatches
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "($us1.$nm1.$qs1.$aln.$nm1.|($us2|$us3).$nm1.$qs1.$aln..)" {
+		incr matches
 		exp_continue
 	}
 	timeout {
-		send_user "\nFAILURE: sacctmgr delete not responding\n"
-		slow_kill $sadel_pid
-		set exit_code 1
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
 	}
 	eof {
 		wait
 	}
 }
 
-if {$damatches != 1} {
-	send_user "\nFAILURE: sacctmgr had a problem deleting account got $damatches\n"
-	set exit_code 1
+if {$matches != 3} {
+	send_user "\nFAILURE:  Coordinator add 3 incorrect with only $matches.\n"
+	incr exit_code 1
 }
 
-if { ![check_acct_associations] } {
-	send_user "\nFAILURE:  Our associations don't line up\n"
-	set exit_code 1
+# Next, add coordinator to two users 
+incr exit_code [_add_coor "$nm1,$nm3" "$us2,$us3"]
+if { $exit_code } {
+	_remove_user "" "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
 }
 
 #
-# Use sacctmgr to delete the test cluster
+# Use sacctmgr to list the test coordinator additions
 #
-set sadel_pid [spawn $sacctmgr -i $del $clu $tc1]
+set matches 0
+set my_pid [spawn $sacctmgr -n -p list user names=$us1,$us2,$us3 withcoor]
 expect {
-	-re "Deleting clusters" {
-		incr dcmatches
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "($us1.$nm1.$qs1.$aln.$nm1.|($us2|$us3).$nm1.$qs1.$aln.$nm1,$nm3)" {
+		incr matches
 		exp_continue
 	}
 	timeout {
-		send_user "\nFAILURE: sacctmgr delete not responding\n"
-		slow_kill $sadel_pid
-		set exit_code 1
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
 	}
 	eof {
 		wait
 	}
 }
 
-if {$dcmatches != 1} {
-	send_user "\nFAILURE: sacctmgr had a problem deleting cluster got $dcmatches\n"
-	set exit_code 1
+if {$matches != 3} {
+	send_user "\nFAILURE:  User modification 3 incorrect with only $matches.\n"
+	incr exit_code 1
 }
 
-if { ![check_acct_associations] } {
-	send_user "\nFAILURE:  Our associations don't line up\n"
-	set exit_code 1
-}
+# This is the end below here
+
+incr exit_code [_remove_user "" "" "$us1,$us2,$us3"]
+incr exit_code [_remove_acct "" "$nm1,$nm2,$nm3"]
+incr exit_code [_remove_cluster "$tc1,$tc2,$tc3"]
 
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
-		} else {
+} else {
 	send_user "\nFAILURE\n"
-		}
-
+}
 exit $exit_code
+
diff --git a/testsuite/expect/test21.5 b/testsuite/expect/test21.5
index 2a81fc0c10d69b9e1de25bcdeaef1571d505fcea..4110ad1184ad14fdb6e6ead06b8adf0785b4148e 100755
--- a/testsuite/expect/test21.5
+++ b/testsuite/expect/test21.5
@@ -59,11 +59,75 @@ set access_err  0
 
 print_header $test_id
 
+#
+# Check accounting config and bail if not found.
+#
 if { [test_account_storage] == 0 } {
 	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
 	exit 0
 }
- 
+
+#
+# Use sacctmgr to delete the test cluster
+#
+	set nothing 0
+	set matches 0
+
+set sadel_pid [spawn $sacctmgr -i $del $clu $tc1]
+
+	expect {
+		-re "privilege to perform this action" {
+			set access_err 1
+			exp_continue
+		}
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Deleting clusters" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$access_err != 0} {
+		return 1
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacctmgr had a problem deleting cluster got $matches\n"
+		incr exit_code 1
+	}
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
 #
 # Use sacctmgr to create a cluster
 #
@@ -126,6 +190,7 @@ if { ![check_acct_associations] } {
 	send_user "\nFAILURE:  Our associations don't line up\n"
 	set exit_code 1
 }
+
 #
 # Use sacctmgr to delete the test cluster
 #
diff --git a/testsuite/expect/test7.11 b/testsuite/expect/test7.11
index 196d75ce6ed42933c054a0f765b17d09dc7a794e..dd278611ba7cd96f4d77f8876ffae3da36205251 100755
--- a/testsuite/expect/test7.11
+++ b/testsuite/expect/test7.11
@@ -127,6 +127,10 @@ expect {
 		incr matches
 		exp_continue
 	}
+	-re "Registered component of slurm test suite" {
+		incr matches
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: srun not responding\n"
 		slow_kill $srun_pid
@@ -136,7 +140,7 @@ expect {
 		wait
 	}
 }
-if {$matches != 1} {
+if {$matches != 2} {
 	send_user "\nFAILURE: spank help message not in srun help message\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test7.11.prog.c b/testsuite/expect/test7.11.prog.c
index 91496061745cce14356d3fb7060853a9e57188da..954e8a1bfb691454dcf70ac70fc1c88e6181d1bd 100644
--- a/testsuite/expect/test7.11.prog.c
+++ b/testsuite/expect/test7.11.prog.c
@@ -52,6 +52,14 @@ struct spank_option spank_options[] =
 	},
 	SPANK_OPTIONS_TABLE_END
 };
+struct spank_option spank_options_reg[] =
+{
+	{ "test_suite_reg", "[opt_arg]", 
+		"Registered component of slurm test suite.", 2, 0,
+		_test_opt_process
+	},
+	SPANK_OPTIONS_TABLE_END
+};
 
 static int _test_opt_process(int val, const char *optarg, int remote)
 {
@@ -65,6 +73,8 @@ static int _test_opt_process(int val, const char *optarg, int remote)
 /*  Called from both srun and slurmd */
 int slurm_spank_init(spank_t sp, int ac, char **av)
 {
+	if (spank_option_register(sp, spank_options_reg) != ESPANK_SUCCESS)
+		slurm_error("spank_option_register error");
 	if (spank_remote(sp) && (ac == 1))
 		opt_out_file = strdup(av[0]);