diff --git a/NEWS b/NEWS
index 2e0a5990d8a07647d09aeefaa6dfdc877d58c2ed..4577a9004bb63ecf0b9812edd46d526a2523a4b5 100644
--- a/NEWS
+++ b/NEWS
@@ -19,6 +19,7 @@ documents those changes that are of interest to users and admins.
  -- Added ability to change a users name in accounting.
  -- Restore squeue support for "%G" format (group id) accidentally removed in
     2.2.0.pre7.
+ -- Added preempt_mode option to qos.
 
 * Changes in SLURM 2.2.0.pre8
 =============================
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index 7d5f6a90cd4f6d465fa93d009608b52beba03565..113d795289a6e53a795753fd4c1d921f3514a9b0 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -887,6 +887,14 @@ Name of the QOS.
 \fIPreempt\fP
 Other QOS\' this QOS can preempt.
 
+.TP
+\fIPreemptMode\fP
+Mechanism used to preempt jobs of this QOS if the clusters \fIPreemptType\fP
+is configured to \fIpreempt/qos\fP.  The default preemption mechanism
+is specified by the cluster\-wide \fIPreemptMode\fP configuration parameter.
+Possible values are "Cluster" (meaning use cluster default), "Cancel",
+"Checkpoint", "Requeue" and "Suspend".
+
 .TP
 \fIPriority\fP
 What priority will be added to a job\'s priority when using this QOS.
@@ -1007,6 +1015,14 @@ quotes with nothing between them) restores it's default setting.  You
 can also use the operator += and \-= to add or remove certain QOS's
 from a QOS list.
 
+.TP
+\fIPreemptMode\fP
+Mechanism used to preempt jobs of this QOS if the clusters \fIPreemptType\fP
+is configured to \fIpreempt/qos\fP.  The default preemption mechanism
+is specified by the cluster\-wide \fIPreemptMode\fP configuration parameter.
+Possible values are "Cluster" (meaning use cluster default), "Cancel",
+"Checkpoint", "Requeue" and "Suspend".
+
 .TP
 \fIPriority\fP
 What priority will be added to a job\'s priority when using this QOS.
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 5a1a907125312e7dc2f64919d499ba05d4fde391..c355d7aa541e20c38184207e3ccbcb264ebd9879 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -1605,9 +1605,9 @@ typedef struct reservation_name_msg {
 #define PREEMPT_MODE_OFF	0x0000	/* disable job preemption */
 #define PREEMPT_MODE_SUSPEND	0x0001	/* suspend jobs to preempt */
 #define PREEMPT_MODE_REQUEUE	0x0002	/* requeue or kill jobs to preempt */
-#define PREEMPT_MODE_CHECKPOINT	0x0003	/* checkpoint job to preempt,
+#define PREEMPT_MODE_CHECKPOINT	0x0004	/* checkpoint job to preempt,
 					 * no automatic restart */
-#define PREEMPT_MODE_CANCEL	0x0004	/* always cancel the job */
+#define PREEMPT_MODE_CANCEL	0x0008	/* always cancel the job */
 #define PREEMPT_MODE_GANG	0x8000	/* enable gang scheduling */
 
 typedef struct slurm_ctl_conf {
diff --git a/slurm/slurmdb.h b/slurm/slurmdb.h
index 465778091f5ff7db64ddfd5e84b2f5672f8c7498..c3a6671462941524579b7716692b68a5b320c13c 100644
--- a/slurm/slurmdb.h
+++ b/slurm/slurmdb.h
@@ -187,6 +187,7 @@ typedef struct {
 	List fairshare_list;	/* fairshare number */
 
 	List grp_cpu_mins_list; /* list of char * */
+	List grp_cpu_run_mins_list; /* list of char * */
 	List grp_cpus_list; /* list of char * */
 	List grp_jobs_list;	/* list of char * */
 	List grp_nodes_list; /* list of char * */
@@ -196,6 +197,7 @@ typedef struct {
 	List id_list;		/* list of char */
 
 	List max_cpu_mins_pj_list; /* list of char * */
+	List max_cpu_run_mins_list; /* list of char * */
 	List max_cpus_pj_list; /* list of char * */
 	List max_jobs_list;	/* list of char * */
 	List max_nodes_pj_list; /* list of char * */
@@ -337,67 +339,75 @@ typedef struct {
 /* slurmdb_association_cond_t is defined above alphabetical */
 
 typedef struct {
-	List accounting_list; 	/* list of slurmdb_accounting_rec_t *'s */
-	char *acct;		/* account/project associated to association */
-	char *cluster;		/* cluster associated to association
-				 * */
-
-	uint64_t grp_cpu_mins; /* max number of cpu minutes the
-				* underlying group of
-				* associations can run for */
-	uint32_t grp_cpus; /* max number of cpus the
-			    * underlying group of
-			    * associations can allocate at one time */
-	uint32_t grp_jobs;	/* max number of jobs the
-				 * underlying group of associations can run
-				 * at one time */
-	uint32_t grp_nodes; /* max number of nodes the
-			     * underlying group of
-			     * associations can allocate at once */
-	uint32_t grp_submit_jobs; /* max number of jobs the
-				   * underlying group of
-				   * associations can submit at
-				   * one time */
-	uint32_t grp_wall; /* total time in hours the
-			    * underlying group of
-			    * associations can run for */
-
-	uint32_t id;		/* id identifing a combination of
-				 * user-account-cluster(-partition) */
-
-	uint32_t lft;		/* lft used for grouping sub
-				 * associations and jobs as a left
-				 * most container used with rgt */
-
-	uint64_t max_cpu_mins_pj; /* max number of cpu seconds this
-				   * association can have per job */
-	uint32_t max_cpus_pj; /* max number of cpus this
-			       * association can allocate per job */
-	uint32_t max_jobs;	/* max number of jobs this association can run
-				 * at one time */
-	uint32_t max_nodes_pj; /* max number of nodes this
-				* association can allocate per job */
-	uint32_t max_submit_jobs; /* max number of jobs that can be
-				     submitted by association */
-	uint32_t max_wall_pj; /* longest time this
-			       * association can run a job */
-
-	char *parent_acct;	/* name of parent account */
-	uint32_t parent_id;	/* id of parent account */
-	char *partition;	/* optional partition in a cluster
-				 * associated to association */
-
-	List qos_list;          /* list of char * */
-
-	uint32_t rgt;		/* rgt used for grouping sub
-				 * associations and jobs as a right
-				 * most container used with lft */
-
-	uint32_t shares_raw;	/* number of shares allocated to association */
-
-	uint32_t uid;		/* user ID */
+	List accounting_list; 	   /* list of slurmdb_accounting_rec_t *'s */
+	char *acct;		   /* account/project associated to
+				    * association */
+	char *cluster;		   /* cluster associated to association */
+
+	uint64_t grp_cpu_mins;     /* max number of cpu minutes the
+				    * underlying group of
+				    * associations can run for */
+	uint64_t grp_cpu_run_mins; /* max number of cpu minutes the
+				    * undeflying group of
+				    * assoiciations can
+				    * having running at one time */
+	uint32_t grp_cpus;         /* max number of cpus the
+				    * underlying group of
+				    * associations can allocate at one time */
+	uint32_t grp_jobs;	   /* max number of jobs the
+				    * underlying group of associations can run
+				    * at one time */
+	uint32_t grp_nodes;        /* max number of nodes the
+				    * underlying group of
+				    * associations can allocate at once */
+	uint32_t grp_submit_jobs;  /* max number of jobs the
+				    * underlying group of
+				    * associations can submit at
+				    * one time */
+	uint32_t grp_wall;         /* total time in hours the
+				    * underlying group of
+				    * associations can run for */
+
+	uint32_t id;		   /* id identifing a combination of
+				    * user-account-cluster(-partition) */
+
+	uint32_t lft;		   /* lft used for grouping sub
+				    * associations and jobs as a left
+				    * most container used with rgt */
+
+	uint64_t max_cpu_mins_pj;  /* max number of cpu seconds this
+				    * association can have per job */
+	uint64_t max_cpu_run_mins; /* max number of cpu minutes this
+				    * assoiciation can
+				    * having running at one time */
+	uint32_t max_cpus_pj;      /* max number of cpus this
+				    * association can allocate per job */
+	uint32_t max_jobs;	   /* max number of jobs this
+				    * association can run at one time */
+	uint32_t max_nodes_pj;     /* max number of nodes this
+				    * association can allocate per job */
+	uint32_t max_submit_jobs;  /* max number of jobs that can be
+				      submitted by association */
+	uint32_t max_wall_pj;      /* longest time this
+				    * association can run a job */
+
+	char *parent_acct;	   /* name of parent account */
+	uint32_t parent_id;	   /* id of parent account */
+	char *partition;	   /* optional partition in a cluster
+				    * associated to association */
+
+	List qos_list;             /* list of char * */
+
+	uint32_t rgt;		   /* rgt used for grouping sub
+				    * associations and jobs as a right
+				    * most container used with lft */
+
+	uint32_t shares_raw;	   /* number of shares allocated to
+				    * association */
+
+	uint32_t uid;		   /* user ID */
 	assoc_mgr_association_usage_t *usage;
-	char *user;		/* user associated to association */
+	char *user;		   /* user associated to association */
 } slurmdb_association_rec_t;
 
 typedef struct {
@@ -531,6 +541,9 @@ typedef struct {
 	uint32_t id;
 	uint64_t grp_cpu_mins; /* max number of cpu minutes all jobs
 				* running under this qos can run for */
+	uint64_t grp_cpu_run_mins; /* max number of cpu minutes all jobs
+				    * running under this qos can
+				    * having running at one time */
 	uint32_t grp_cpus; /* max number of cpus this qos
 			      can allocate at one time */
 	uint32_t grp_jobs;	/* max number of jobs this qos can run
@@ -541,8 +554,11 @@ typedef struct {
 				   * one time */
 	uint32_t grp_wall; /* total time in hours this qos can run for */
 
-	uint64_t max_cpu_mins_pj; /* max number of cpu mins a user can
+	uint64_t max_cpu_mins_pj; /* max number of cpu mins a job can
 				   * use with this qos */
+	uint64_t max_cpu_run_mins_pu; /* max number of cpu mins a user can
+				   * allocate at a given time when
+				   * using this qos */
 	uint32_t max_cpus_pj; /* max number of cpus a job can
 			       * allocate with this qos */
 	uint32_t max_jobs_pu;	/* max number of jobs a user can
@@ -559,9 +575,10 @@ typedef struct {
 	List preempt_list; /* list of char *'s only used to add or
 			    * change the other qos' this can preempt,
 			    * when doing a get use the preempt_bitstr */
+	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	uint32_t priority;  /* ranged int needs to be a unint for
 			     * heterogeneous systems */
-	assoc_mgr_qos_usage_t *usage;
+	assoc_mgr_qos_usage_t *usage; /* For internal use only, DON'T PACK */
 	double usage_factor; /* factor to apply to usage in this qos */
 } slurmdb_qos_rec_t;
 
@@ -569,6 +586,7 @@ typedef struct {
 	List description_list; /* list of char * */
 	List id_list; /* list of char * */
 	List name_list; /* list of char * */
+	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	uint16_t with_deleted;
 } slurmdb_qos_cond_t;
 
@@ -664,6 +682,8 @@ typedef struct {
 /* Right now this is used in the slurmdb_qos_rec_t structure.  In the
  * user_limit_list. */
 typedef struct {
+	uint64_t cpu_run_mins; /* how many cpu mins are allocated
+				* currently */
 	uint32_t jobs;	/* count of active jobs */
 	uint32_t submit_jobs; /* count of jobs pending or running */
 	uint32_t uid;
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 769da90a7c675e0defe12ee44741a4409f1cf6cd..4b7ab1d0a3c3797aac1277f68c643c65a640a4a1 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -1307,6 +1307,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 		assoc->cluster = ret_assoc->cluster;
 
 	assoc->grp_cpu_mins    = ret_assoc->grp_cpu_mins;
+	assoc->grp_cpu_run_mins= ret_assoc->grp_cpu_run_mins;
 	assoc->grp_cpus        = ret_assoc->grp_cpus;
 	assoc->grp_jobs        = ret_assoc->grp_jobs;
 	assoc->grp_nodes       = ret_assoc->grp_nodes;
@@ -1316,6 +1317,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 	assoc->lft             = ret_assoc->lft;
 
 	assoc->max_cpu_mins_pj = ret_assoc->max_cpu_mins_pj;
+	assoc->max_cpu_run_mins= ret_assoc->max_cpu_run_mins;
 	assoc->max_cpus_pj     = ret_assoc->max_cpus_pj;
 	assoc->max_jobs        = ret_assoc->max_jobs;
 	assoc->max_nodes_pj    = ret_assoc->max_nodes_pj;
@@ -1491,6 +1493,7 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	qos->id = found_qos->id;
 
 	qos->grp_cpu_mins    = found_qos->grp_cpu_mins;
+	qos->grp_cpu_run_mins= found_qos->grp_cpu_run_mins;
 	qos->grp_cpus        = found_qos->grp_cpus;
 	qos->grp_jobs        = found_qos->grp_jobs;
 	qos->grp_nodes       = found_qos->grp_nodes;
@@ -1498,6 +1501,7 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	qos->grp_wall        = found_qos->grp_wall;
 
 	qos->max_cpu_mins_pj = found_qos->max_cpu_mins_pj;
+	qos->max_cpu_run_mins_pu = found_qos->max_cpu_run_mins_pu;
 	qos->max_cpus_pj     = found_qos->max_cpus_pj;
 	qos->max_jobs_pu     = found_qos->max_jobs_pu;
 	qos->max_nodes_pj    = found_qos->max_nodes_pj;
@@ -1513,6 +1517,7 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	} else
 		qos->preempt_bitstr = found_qos->preempt_bitstr;
 
+	qos->preempt_mode = found_qos->preempt_mode;
 	qos->priority = found_qos->priority;
 
 	/* Don't send any usage info since we don't know if the usage
@@ -2091,8 +2096,11 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 				}
 			}
 
-			if(object->grp_cpu_mins != NO_VAL)
+			if(object->grp_cpu_mins != (uint64_t)NO_VAL)
 				rec->grp_cpu_mins = object->grp_cpu_mins;
+			if(object->grp_cpu_run_mins != (uint64_t)NO_VAL)
+				rec->grp_cpu_run_mins =
+					object->grp_cpu_run_mins;
 			if(object->grp_cpus != NO_VAL)
 				rec->grp_cpus = object->grp_cpus;
 			if(object->grp_jobs != NO_VAL)
@@ -2104,8 +2112,11 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 			if(object->grp_wall != NO_VAL)
 				rec->grp_wall = object->grp_wall;
 
-			if(object->max_cpu_mins_pj != NO_VAL)
+			if(object->max_cpu_mins_pj != (uint64_t)NO_VAL)
 				rec->max_cpu_mins_pj = object->max_cpu_mins_pj;
+			if(object->max_cpu_run_mins != (uint64_t)NO_VAL)
+				rec->max_cpu_run_mins =
+					object->max_cpu_run_mins;
 			if(object->max_cpus_pj != NO_VAL)
 				rec->max_cpus_pj = object->max_cpus_pj;
 			if(object->max_jobs != NO_VAL)
@@ -2591,8 +2602,11 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 				break;
 			}
 
-			if(object->grp_cpu_mins != NO_VAL)
+			if(object->grp_cpu_mins != (uint64_t)NO_VAL)
 				rec->grp_cpu_mins = object->grp_cpu_mins;
+			if(object->grp_cpu_run_mins != (uint64_t)NO_VAL)
+				rec->grp_cpu_run_mins =
+					object->grp_cpu_run_mins;
 			if(object->grp_cpus != NO_VAL)
 				rec->grp_cpus = object->grp_cpus;
 			if(object->grp_jobs != NO_VAL)
@@ -2604,8 +2618,11 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 			if(object->grp_wall != NO_VAL)
 				rec->grp_wall = object->grp_wall;
 
-			if(object->max_cpu_mins_pj != NO_VAL)
+			if(object->max_cpu_mins_pj != (uint64_t)NO_VAL)
 				rec->max_cpu_mins_pj = object->max_cpu_mins_pj;
+			if(object->max_cpu_run_mins_pu != (uint64_t)NO_VAL)
+				rec->max_cpu_run_mins_pu =
+					object->max_cpu_run_mins_pu;
 			if(object->max_cpus_pj != NO_VAL)
 				rec->max_cpus_pj = object->max_cpus_pj;
 			if(object->max_jobs_pu != NO_VAL)
@@ -2624,7 +2641,7 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 
 				rec->preempt_bitstr = object->preempt_bitstr;
 				object->preempt_bitstr = NULL;
-				/* 			char *tmp = get_qos_complete_str_bitstr( */
+				/* char *tmp = get_qos_complete_str_bitstr( */
 /* 					assoc_mgr_qos_list, */
 /* 					rec->preempt_bitstr); */
 
@@ -2633,6 +2650,9 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 /* 				xfree(tmp); */
 			}
 
+			if(object->preempt_mode != (uint16_t)NO_VAL)
+				rec->preempt_mode = object->preempt_mode;
+
 			if(object->priority != NO_VAL)
 				rec->priority = object->priority;
 
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index 3045e56162739acdeec2423e0c6484a6b407ad95..d7f7067a670218aeb2da12d0702798916561207c 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -116,6 +116,8 @@ struct assoc_mgr_association_usage {
 	long double usage_norm;	/* normalized usage (DON'T PACK) */
 	long double usage_raw;	/* measure of resource usage (DON'T PACK) */
 
+	uint64_t used_cpu_run_mins; /* count of running cpu mins
+				     * (DON'T PACK) */
 	uint32_t used_jobs;	/* count of active jobs (DON'T PACK) */
 	uint32_t used_submit_jobs; /* count of jobs pending or running
 				    * (DON'T PACK) */
@@ -130,6 +132,8 @@ struct assoc_mgr_qos_usage {
 			  jobs (DON'T PACK) */
 	uint32_t grp_used_cpus; /* count of cpus in use in this qos
 				 * (DON'T PACK) */
+	uint64_t grp_used_cpu_run_mins; /* count of running cpu mins
+					 * (DON'T PACK) */
 	uint32_t grp_used_jobs;	/* count of active jobs (DON'T PACK) */
 	uint32_t grp_used_nodes; /* count of nodes in use in this qos
 				  * (DON'T PACK) */
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index 6d48f70c2aeed2dafda25a3b4d227146e3e0f004..5dcb0ee4ea3cb7d1b258d1b266364f83e502431b 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -221,7 +221,8 @@ extern int slurm_addto_char_list(List char_list, char *names)
 					info("There is a problem with "
 					     "your request.  It appears you "
 					     "have spaces inside your list.");
-					break;
+					count = 0;
+					goto endit;
 				}
 			}
 			i++;
@@ -247,6 +248,7 @@ extern int slurm_addto_char_list(List char_list, char *names)
 		xstrtolower(name);
 		list_append(char_list, name);
 	}
+endit:
 	list_iterator_destroy(itr);
 	return count;
 }
@@ -939,7 +941,8 @@ extern uint16_t preempt_mode_num(const char *preempt_mode)
 	while (tok) {
 		if (strcasecmp(tok, "gang") == 0) {
 			mode_num |= PREEMPT_MODE_GANG;
-		} else if (strcasecmp(tok, "off") == 0) {
+		} else if ((strcasecmp(tok, "off") == 0)
+			   || (strcasecmp(tok, "cluster") == 0)) {
 			mode_num += PREEMPT_MODE_OFF;
 			preempt_modes++;
 		} else if (strcasecmp(tok, "cancel") == 0) {
diff --git a/src/common/slurmdb_defs.c b/src/common/slurmdb_defs.c
index 90ee24af8eeb6d6daffe92f1dacce9b2d6bb4f1d..251ebacc9aca0a7a58fc2dd5da9dbc17ead2efe9 100644
--- a/src/common/slurmdb_defs.c
+++ b/src/common/slurmdb_defs.c
@@ -572,6 +572,9 @@ extern void slurmdb_destroy_association_cond(void *object)
 
 		if(slurmdb_association->grp_cpu_mins_list)
 			list_destroy(slurmdb_association->grp_cpu_mins_list);
+		if(slurmdb_association->grp_cpu_run_mins_list)
+			list_destroy(slurmdb_association->
+				     grp_cpu_run_mins_list);
 		if(slurmdb_association->grp_cpus_list)
 			list_destroy(slurmdb_association->grp_cpus_list);
 		if(slurmdb_association->grp_jobs_list)
@@ -588,6 +591,9 @@ extern void slurmdb_destroy_association_cond(void *object)
 
 		if(slurmdb_association->max_cpu_mins_pj_list)
 			list_destroy(slurmdb_association->max_cpu_mins_pj_list);
+		if(slurmdb_association->max_cpu_run_mins_list)
+			list_destroy(slurmdb_association->
+				     max_cpu_run_mins_list);
 		if(slurmdb_association->max_cpus_pj_list)
 			list_destroy(slurmdb_association->max_cpus_pj_list);
 		if(slurmdb_association->max_jobs_list)
@@ -1082,7 +1088,8 @@ extern void slurmdb_init_association_rec(slurmdb_association_rec_t *assoc)
 
 	memset(assoc, 0, sizeof(slurmdb_association_rec_t));
 
-	assoc->grp_cpu_mins = NO_VAL;
+	assoc->grp_cpu_mins = (uint64_t)NO_VAL;
+	assoc->grp_cpu_run_mins = (uint64_t)NO_VAL;
 	assoc->grp_cpus = NO_VAL;
 	assoc->grp_jobs = NO_VAL;
 	assoc->grp_nodes = NO_VAL;
@@ -1091,7 +1098,8 @@ extern void slurmdb_init_association_rec(slurmdb_association_rec_t *assoc)
 
 	/* assoc->level_shares = NO_VAL; */
 
-	assoc->max_cpu_mins_pj = NO_VAL;
+	assoc->max_cpu_mins_pj = (uint64_t)NO_VAL;
+	assoc->max_cpu_run_mins = (uint64_t)NO_VAL;
 	assoc->max_cpus_pj = NO_VAL;
 	assoc->max_jobs = NO_VAL;
 	assoc->max_nodes_pj = NO_VAL;
@@ -1122,16 +1130,19 @@ extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos)
 
 	memset(qos, 0, sizeof(slurmdb_qos_rec_t));
 
+	qos->preempt_mode = (uint16_t)NO_VAL;
 	qos->priority = NO_VAL;
 
-	qos->grp_cpu_mins = NO_VAL;
+	qos->grp_cpu_mins = (uint64_t)NO_VAL;
+	qos->grp_cpu_run_mins = (uint64_t)NO_VAL;
 	qos->grp_cpus = NO_VAL;
 	qos->grp_jobs = NO_VAL;
 	qos->grp_nodes = NO_VAL;
 	qos->grp_submit_jobs = NO_VAL;
 	qos->grp_wall = NO_VAL;
 
-	qos->max_cpu_mins_pj = NO_VAL;
+	qos->max_cpu_mins_pj = (uint64_t)NO_VAL;
+	qos->max_cpu_run_mins_pu = (uint64_t)NO_VAL;
 	qos->max_cpus_pj = NO_VAL;
 	qos->max_jobs_pu = NO_VAL;
 	qos->max_nodes_pj = NO_VAL;
@@ -1638,6 +1649,12 @@ extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr,
 	else if(assoc_ptr->grp_cpu_mins != NO_VAL)
 		debug2("  GrpCPUMins       : %llu", assoc_ptr->grp_cpu_mins);
 
+	if(assoc_ptr->grp_cpu_run_mins == INFINITE)
+		debug2("  GrpCPURunMins    : NONE");
+	else if(assoc_ptr->grp_cpu_mins != NO_VAL)
+		debug2("  GrpCPURunMins    : %llu",
+		       assoc_ptr->grp_cpu_run_mins);
+
 	if(assoc_ptr->grp_cpus == INFINITE)
 		debug2("  GrpCPUs          : NONE");
 	else if(assoc_ptr->grp_cpus != NO_VAL)
@@ -1672,6 +1689,12 @@ extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr,
 	else if(assoc_ptr->max_cpu_mins_pj != NO_VAL)
 		debug2("  MaxCPUMins       : %llu", assoc_ptr->max_cpu_mins_pj);
 
+	if(assoc_ptr->max_cpu_run_mins == INFINITE)
+		debug2("  MaxCPURunMins    : NONE");
+	else if(assoc_ptr->max_cpu_run_mins != NO_VAL)
+		debug2("  MaxCPURunMins    : %llu",
+		       assoc_ptr->max_cpu_run_mins);
+
 	if(assoc_ptr->max_cpus_pj == INFINITE)
 		debug2("  MaxCPUs          : NONE");
 	else if(assoc_ptr->max_cpus_pj != NO_VAL)
diff --git a/src/common/slurmdb_pack.c b/src/common/slurmdb_pack.c
index 2352cfea74535a6f7266966f082d19fac61a2183..b4ee62bf7cfa83149a5a1582c4f7a9c8e3e710a8 100644
--- a/src/common/slurmdb_pack.c
+++ b/src/common/slurmdb_pack.c
@@ -371,7 +371,20 @@ extern void slurmdb_pack_used_limits(void *in, uint16_t rpc_version, Buf buffer)
 {
 	slurmdb_used_limits_t *object = (slurmdb_used_limits_t *)in;
 
-	if(rpc_version >= 6) {
+	if(rpc_version >= 8) {
+		if(!object) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+
+		pack64(object->cpu_run_mins, buffer);
+		pack32(object->jobs, buffer);
+		pack32(object->submit_jobs, buffer);
+		pack32(object->uid, buffer);
+	} else if(rpc_version >= 6) {
 		if(!object) {
 			pack32(0, buffer);
 			pack32(0, buffer);
@@ -393,7 +406,12 @@ extern int slurmdb_unpack_used_limits(void **object,
 
 	*object = (void *)object_ptr;
 
-	if(rpc_version >= 6) {
+	if(rpc_version >= 8) {
+		safe_unpack64(&object_ptr->cpu_run_mins, buffer);
+		safe_unpack32(&object_ptr->jobs, buffer);
+		safe_unpack32(&object_ptr->submit_jobs, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+	} else if(rpc_version >= 6) {
 		safe_unpack32(&object_ptr->jobs, buffer);
 		safe_unpack32(&object_ptr->submit_jobs, buffer);
 		safe_unpack32(&object_ptr->uid, buffer);
@@ -838,7 +856,112 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 	char *tmp_info = NULL;
 	slurmdb_association_rec_t *object = (slurmdb_association_rec_t *)in;
 
-	if (rpc_version >= 4) {
+	if (rpc_version >= 8) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			packnull(buffer);
+			return;
+		}
+
+		if(object->accounting_list)
+			count = list_count(object->accounting_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while((slurmdb_info = list_next(itr))) {
+				slurmdb_pack_accounting_rec(slurmdb_info,
+							    rpc_version,
+							    buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		packstr(object->acct, buffer);
+		packstr(object->cluster, buffer);
+
+		/* this used to be named fairshare to not have to redo
+		   the order of things just to be in alpha order we
+		   just renamed it and called it good */
+		pack32(object->shares_raw, buffer);
+
+		pack64(object->grp_cpu_mins, buffer);
+		pack64(object->grp_cpu_run_mins, buffer);
+		pack32(object->grp_cpus, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_nodes, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack32(object->id, buffer);
+		pack32(object->lft, buffer);
+
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack64(object->max_cpu_run_mins, buffer);
+		pack32(object->max_cpus_pj, buffer);
+		pack32(object->max_jobs, buffer);
+		pack32(object->max_nodes_pj, buffer);
+		pack32(object->max_submit_jobs, buffer);
+		pack32(object->max_wall_pj, buffer);
+
+		packstr(object->parent_acct, buffer);
+		pack32(object->parent_id, buffer);
+		packstr(object->partition, buffer);
+
+		if(object->qos_list)
+			count = list_count(object->qos_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->qos_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(object->rgt, buffer);
+		pack32(object->uid, buffer);
+
+		packstr(object->user, buffer);
+	} else if (rpc_version >= 4) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			packnull(buffer);
@@ -885,7 +1008,8 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 			itr = list_iterator_create(object->accounting_list);
 			while((slurmdb_info = list_next(itr))) {
 				slurmdb_pack_accounting_rec(slurmdb_info,
-							    rpc_version, buffer);
+							    rpc_version,
+							    buffer);
 			}
 			list_iterator_destroy(itr);
 		}
@@ -956,7 +1080,70 @@ extern int slurmdb_unpack_association_rec(void **object, uint16_t rpc_version,
 
 	slurmdb_init_association_rec(object_ptr);
 
-	if (rpc_version >= 4) {
+	if (rpc_version >= 8) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(slurmdb_destroy_accounting_rec);
+			for(i=0; i<count; i++) {
+				if(slurmdb_unpack_accounting_rec(
+					   (void **)&slurmdb_info,
+					   rpc_version,
+					   buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list,
+					    slurmdb_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&object_ptr->shares_raw, buffer);
+
+		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
+		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpack32(&object_ptr->lft, buffer);
+
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack64(&object_ptr->max_cpu_run_mins, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
+		safe_unpack32(&object_ptr->max_jobs, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&object_ptr->parent_id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&count, buffer);
+		/* This needs to look for zero to tell if something
+		   has changed */
+		if(count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->rgt, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	} else if (rpc_version >= 4) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
 			object_ptr->accounting_list =
@@ -1091,7 +1278,81 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
 
-	if(rpc_version >= 6) {
+	if(rpc_version >= 8) {
+		if(!object) {
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+
+			pack_bit_str(NULL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack32(0, buffer);
+
+			packdouble(NO_VAL, buffer);
+			return;
+		}
+		packstr(object->description, buffer);
+		pack32(object->id, buffer);
+
+		pack64(object->grp_cpu_mins, buffer);
+		pack64(object->grp_cpu_run_mins, buffer);
+		pack32(object->grp_cpus, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_nodes, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack64(object->max_cpu_run_mins_pu, buffer);
+		pack32(object->max_cpus_pj, buffer);
+		pack32(object->max_jobs_pu, buffer);
+		pack32(object->max_nodes_pj, buffer);
+		pack32(object->max_submit_jobs_pu, buffer);
+		pack32(object->max_wall_pj, buffer);
+
+		packstr(object->name, buffer);
+
+		pack_bit_str(object->preempt_bitstr, buffer);
+
+		if(object->preempt_list)
+			count = list_count(object->preempt_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->preempt_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->preempt_mode, buffer);
+		pack32(object->priority, buffer);
+
+		packdouble(object->usage_factor, buffer);
+	} else if(rpc_version >= 6) {
 		if(!object) {
 			packnull(buffer);
 			pack32(0, buffer);
@@ -1175,12 +1436,13 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 
 	slurmdb_init_qos_rec(object_ptr);
 
-	if(rpc_version >= 6) {
+	if(rpc_version >= 8) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
 
 		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
 		safe_unpack32(&object_ptr->grp_cpus, buffer);
 		safe_unpack32(&object_ptr->grp_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_nodes, buffer);
@@ -1188,6 +1450,7 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
 		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack64(&object_ptr->max_cpu_run_mins_pu, buffer);
 		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
 		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
@@ -1210,10 +1473,49 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 			}
 		}
 
+		safe_unpack16(&object_ptr->preempt_mode, buffer);
 		safe_unpack32(&object_ptr->priority, buffer);
 
 		safe_unpackdouble(&object_ptr->usage_factor, buffer);
-	}
+	} else if(rpc_version >= 6) {
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+
+		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
+		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		unpack_bit_str(&object_ptr->preempt_bitstr, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->preempt_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preempt_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->priority, buffer);
+
+		safe_unpackdouble(&object_ptr->usage_factor, buffer);
+	}
 
 	return SLURM_SUCCESS;
 
@@ -1909,7 +2211,7 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 	ListIterator itr = NULL;
 	slurmdb_association_cond_t *object = (slurmdb_association_cond_t *)in;
 
-	if(rpc_version >= 5) {
+	if(rpc_version >= 8) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -1922,6 +2224,7 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
 
 			pack32(NO_VAL, buffer);
 
@@ -1931,6 +2234,7 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
 
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -2003,6 +2307,20 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 		}
 		count = NO_VAL;
 
+		if(object->grp_cpu_run_mins_list)
+			count = list_count(object->grp_cpu_run_mins_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->grp_cpu_run_mins_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
 		if(object->grp_cpus_list)
 			count = list_count(object->grp_cpus_list);
 
@@ -2094,6 +2412,21 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
+
+		if(object->max_cpu_run_mins_list)
+			count = list_count(object->max_cpu_run_mins_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_cpu_run_mins_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
 		if(object->max_cpus_pj_list)
 			count = list_count(object->max_cpus_pj_list);
 
@@ -2192,47 +2525,611 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
-			list_iterator_destroy(itr);
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->with_raw_qos, buffer);
+		pack16(object->with_sub_accts, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	} else if(rpc_version >= 5) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack_time(0, buffer);
+			pack_time(0, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->fairshare_list)
+			count = list_count(object->fairshare_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->fairshare_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_cpu_mins_list)
+			count = list_count(object->grp_cpu_mins_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_cpu_mins_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_cpus_list)
+			count = list_count(object->grp_cpus_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_cpus_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_jobs_list)
+			count = list_count(object->grp_jobs_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_nodes_list)
+			count = list_count(object->grp_nodes_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_nodes_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_submit_jobs_list)
+			count = list_count(object->grp_submit_jobs_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->grp_submit_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_wall_list)
+			count = list_count(object->grp_wall_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_wall_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->id_list)
+			count = list_count(object->id_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+
+		if(object->max_cpu_mins_pj_list)
+			count = list_count(object->max_cpu_mins_pj_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_cpu_mins_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_cpus_pj_list)
+			count = list_count(object->max_cpus_pj_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_cpus_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_jobs_list)
+			count = list_count(object->max_jobs_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_nodes_pj_list)
+			count = list_count(object->max_nodes_pj_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_nodes_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_submit_jobs_list)
+			count = list_count(object->max_submit_jobs_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_submit_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_wall_pj_list)
+			count = list_count(object->max_wall_pj_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_wall_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->parent_acct_list)
+			count = list_count(object->parent_acct_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->parent_acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->qos_list)
+			count = list_count(object->qos_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->qos_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->with_raw_qos, buffer);
+		pack16(object->with_sub_accts, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	}
+}
+
+extern int slurmdb_unpack_association_cond(void **object,
+					   uint16_t rpc_version, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	slurmdb_association_cond_t *object_ptr =
+		xmalloc(sizeof(slurmdb_association_cond_t));
+	char *tmp_info = NULL;
+	*object = object_ptr;
+
+	if(rpc_version >= 8) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->fairshare_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->fairshare_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpu_mins_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpu_mins_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpu_run_mins_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpu_run_mins_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpus_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpus_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_jobs_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_nodes_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_nodes_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_submit_jobs_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_submit_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_wall_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_wall_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpu_mins_pj_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpu_mins_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpu_run_mins_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpu_run_mins_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpus_pj_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpus_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_jobs_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_nodes_pj_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_nodes_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_submit_jobs_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_submit_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_wall_pj_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_wall_pj_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->partition_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->parent_acct_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->parent_acct_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
 		}
-		count = NO_VAL;
-
-		pack_time(object->usage_end, buffer);
-		pack_time(object->usage_start, buffer);
 
-		if(object->user_list)
-			count = list_count(object->user_list);
+		safe_unpack_time(&object_ptr->usage_end, buffer);
+		safe_unpack_time(&object_ptr->usage_start, buffer);
 
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->user_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
 			}
-			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
-
-		pack16(object->with_usage, buffer);
-		pack16(object->with_deleted, buffer);
-		pack16(object->with_raw_qos, buffer);
-		pack16(object->with_sub_accts, buffer);
-		pack16(object->without_parent_info, buffer);
-		pack16(object->without_parent_limits, buffer);
-	}
-}
-
-extern int slurmdb_unpack_association_cond(void **object,
-					   uint16_t rpc_version, Buf buffer)
-{
-	uint32_t uint32_tmp;
-	int i;
-	uint32_t count;
-	slurmdb_association_cond_t *object_ptr =
-		xmalloc(sizeof(slurmdb_association_cond_t));
-	char *tmp_info = NULL;
-	*object = object_ptr;
 
-	if(rpc_version >= 5) {
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->with_raw_qos, buffer);
+		safe_unpack16(&object_ptr->with_sub_accts, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+	} else if(rpc_version >= 5) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
 			object_ptr->acct_list =
@@ -3610,54 +4507,107 @@ extern void slurmdb_pack_qos_cond(void *in, uint16_t rpc_version, Buf buffer)
 	ListIterator itr = NULL;
 	slurmdb_qos_cond_t *object = (slurmdb_qos_cond_t *)in;
 
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack16(0, buffer);
-		return;
-	}
+	if(rpc_version >= 8) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
 
-	if(object->description_list)
-		count = list_count(object->description_list);
+		if(object->description_list)
+			count = list_count(object->description_list);
 
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->description_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->description_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->id_list)
-		count = list_count(object->id_list);
+		if(object->id_list)
+			count = list_count(object->id_list);
 
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->id_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->name_list)
-		count = list_count(object->name_list);
+		if(object->name_list)
+			count = list_count(object->name_list);
 
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->name_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->name_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
+
+		pack16(object->preempt_mode, buffer);
+		pack16(object->with_deleted, buffer);
+	} else {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->description_list)
+			count = list_count(object->description_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->description_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->id_list)
+			count = list_count(object->id_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->name_list)
+			count = list_count(object->name_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->name_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	pack16(object->with_deleted, buffer);
+		pack16(object->with_deleted, buffer);
+	}
 }
 
 extern int slurmdb_unpack_qos_cond(void **object, uint16_t rpc_version,
@@ -3671,34 +4621,77 @@ extern int slurmdb_unpack_qos_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->description_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->description_list, tmp_info);
+	if(rpc_version >= 8) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->description_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->description_list,
+					    tmp_info);
+			}
 		}
-	}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->id_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->id_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
 		}
-	}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->name_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->name_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->name_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->name_list, tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->preempt_mode, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+	} else {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->description_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->description_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->name_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->name_list, tmp_info);
+			}
 		}
+
+		safe_unpack16(&object_ptr->with_deleted, buffer);
 	}
 
-	safe_unpack16(&object_ptr->with_deleted, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
diff --git a/src/common/xstring.c b/src/common/xstring.c
index c99cfeb8300550846f06bb276c7c84cc45fad393..4eaeca2e1f061f44b8d219e17e8f32d4148fbaac 100644
--- a/src/common/xstring.c
+++ b/src/common/xstring.c
@@ -505,7 +505,7 @@ bool xstring_is_whitespace(const char *str)
 /*
  * If str make everything lowercase.  Should not be called on static char *'s
  */
-void xstrtolower(char *str)
+char *xstrtolower(char *str)
 {
 	if(str) {
 		int j = 0;
@@ -514,4 +514,5 @@ void xstrtolower(char *str)
 			j++;
 		}
 	}
+	return str;
 }
diff --git a/src/common/xstring.h b/src/common/xstring.h
index 6222022c9f4b133740634dafb617c49937c9e54f..953768ca39a978777aba53187ea577edd2914fa2 100644
--- a/src/common/xstring.h
+++ b/src/common/xstring.h
@@ -157,7 +157,8 @@ bool xstring_is_whitespace(const char *str);
 
 /*
  * If str make everything lowercase.  Should not be called on static char *'s
+ * Returns the lowered string which is the same pointer that is sent in.
  */
-void xstrtolower(char *str);
+char *xstrtolower(char *str);
 
 #endif /* !_XSTRING_H */
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index a2c5fd6b4aa65f082b55aa08e2521df15616cf26..6a209c31ae94a9d93ee1fbe49a4449ab3f0709b4 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -465,13 +465,16 @@ static int _as_mysql_acct_check_tables(MYSQL *db_conn)
 		{ "max_nodes_per_job", "int default NULL" },
 		{ "max_wall_duration_per_job", "int default NULL" },
 		{ "max_cpu_mins_per_job", "bigint default NULL" },
+		{ "max_cpu_run_mins_per_user", "bigint default NULL" },
 		{ "grp_jobs", "int default NULL" },
 		{ "grp_submit_jobs", "int default NULL" },
 		{ "grp_cpus", "int default NULL" },
 		{ "grp_nodes", "int default NULL" },
 		{ "grp_wall", "int default NULL" },
 		{ "grp_cpu_mins", "bigint default NULL" },
+		{ "grp_cpu_run_mins", "bigint default NULL" },
 		{ "preempt", "text not null default ''" },
+		{ "preempt_mode", "int default 0" },
 		{ "priority", "int default 0" },
 		{ "usage_factor", "double default 1.0 not null" },
 		{ NULL, NULL}
@@ -511,6 +514,7 @@ static int _as_mysql_acct_check_tables(MYSQL *db_conn)
 		"set @mnpj = NULL; "
 		"set @mwpj = NULL; "
 		"set @mcmpj = NULL; "
+		"set @mcrm = NULL; "
 		"set @qos = ''; "
 		"set @delta_qos = ''; "
 		"set @my_acct = acct; "
@@ -521,6 +525,7 @@ static int _as_mysql_acct_check_tables(MYSQL *db_conn)
 		"set @mnpj = 0; "
 		"set @mwpj = 0; "
 		"set @mcmpj = 0; "
+		"set @mcrm = 0; "
 		"set @qos = 0; "
 		"set @delta_qos = 0; "
 		"end if; "
@@ -547,6 +552,9 @@ static int _as_mysql_acct_check_tables(MYSQL *db_conn)
 		"if @mcmpj is NULL then set @s = CONCAT("
 		"@s, '@mcmpj := max_cpu_mins_pj, '); "
 		"end if; "
+		"if @mcrm is NULL then set @s = CONCAT("
+		"@s, '@mcrm := max_cpu_run_mins, '); "
+		"end if; "
 		"if @qos = '' then set @s = CONCAT("
 		"@s, '@qos := qos, "
 		"@delta_qos := CONCAT(delta_qos, @delta_qos), '); "
@@ -558,8 +566,8 @@ static int _as_mysql_acct_check_tables(MYSQL *db_conn)
 		"execute query; "
 		"deallocate prepare query; "
 		"UNTIL (@mj != -1 && @msj != -1 && @mcpj != -1 "
-		"&& @mnpj != -1 && @mwpj != -1 "
-		"&& @mcmpj != -1 && @qos != '') || @my_acct = '' END REPEAT; "
+		"&& @mnpj != -1 && @mwpj != -1 && @mcmpj != -1 "
+		"&& @mcrm != -1 && @qos != '') || @my_acct = '' END REPEAT; "
 		"END;";
 	char *query = NULL;
 	time_t now = time(NULL);
@@ -803,12 +811,14 @@ extern int create_cluster_tables(MYSQL *db_conn, char *cluster_name)
 		{ "max_nodes_pj", "int default NULL" },
 		{ "max_wall_pj", "int default NULL" },
 		{ "max_cpu_mins_pj", "bigint default NULL" },
+		{ "max_cpu_run_mins", "bigint default NULL" },
 		{ "grp_jobs", "int default NULL" },
 		{ "grp_submit_jobs", "int default NULL" },
 		{ "grp_cpus", "int default NULL" },
 		{ "grp_nodes", "int default NULL" },
 		{ "grp_wall", "int default NULL" },
 		{ "grp_cpu_mins", "bigint default NULL" },
+		{ "grp_cpu_run_mins", "bigint default NULL" },
 		{ "qos", "blob not null default ''" },
 		{ "delta_qos", "blob not null default ''" },
 		{ NULL, NULL}
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_assoc.c b/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
index 2178f0057f2c7d80fc099b6adff78c67c60f6ba0..e137792859ea75e3e8e614c562c36a3445b95ad6 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
@@ -50,12 +50,14 @@ char *assoc_req_inx[] = {
 	"partition",
 	"shares",
 	"grp_cpu_mins",
+	"grp_cpu_run_mins",
 	"grp_cpus",
 	"grp_jobs",
 	"grp_nodes",
 	"grp_submit_jobs",
 	"grp_wall",
 	"max_cpu_mins_pj",
+	"max_cpu_run_mins",
 	"max_cpus_pj",
 	"max_jobs",
 	"max_nodes_pj",
@@ -73,13 +75,15 @@ enum {
 	ASSOC_REQ_ACCT,
 	ASSOC_REQ_PART,
 	ASSOC_REQ_FS,
-	ASSOC_REQ_GCH,
+	ASSOC_REQ_GCM,
+	ASSOC_REQ_GCRM,
 	ASSOC_REQ_GC,
 	ASSOC_REQ_GJ,
 	ASSOC_REQ_GN,
 	ASSOC_REQ_GSJ,
 	ASSOC_REQ_GW,
 	ASSOC_REQ_MCMPJ,
+	ASSOC_REQ_MCRM,
 	ASSOC_REQ_MCPJ,
 	ASSOC_REQ_MJ,
 	ASSOC_REQ_MNPJ,
@@ -99,6 +103,7 @@ enum {
 	ASSOC2_REQ_MNPJ,
 	ASSOC2_REQ_MWPJ,
 	ASSOC2_REQ_MCMPJ,
+	ASSOC2_REQ_MCRM,
 	ASSOC2_REQ_QOS,
 	ASSOC2_REQ_DELTA_QOS,
 };
@@ -446,6 +451,7 @@ static int _set_assoc_limits_for_add(
 		ASSOC_REQ_MNPJ,
 		ASSOC_REQ_MWPJ,
 		ASSOC_REQ_MCMPJ,
+		ASSOC_REQ_MCRM,
 		ASSOC_REQ_QOS,
 		ASSOC_REQ_DELTA_QOS,
 	};
@@ -462,7 +468,7 @@ static int _set_assoc_limits_for_add(
 	query = xstrdup_printf("call get_parent_limits('%s', "
 			       "'%s', '%s', %u);"
 			       "select @par_id, @mj, @msj, @mcpj, "
-			       "@mnpj, @mwpj, @mcmpj, @qos, @delta_qos;",
+			       "@mnpj, @mwpj, @mcmpj, @mcrm, @qos, @delta_qos;",
 			       assoc_table, parent, assoc->cluster, 0);
 	debug4("%d(%s:%d) query\n%s",
 	       mysql_conn->conn, THIS_FILE, __LINE__, query);
@@ -486,7 +492,9 @@ static int _set_assoc_limits_for_add(
 	if(row[ASSOC_REQ_MWPJ] && assoc->max_wall_pj == NO_VAL)
 		assoc->max_wall_pj = atoi(row[ASSOC_REQ_MWPJ]);
 	if(row[ASSOC_REQ_MCMPJ] && assoc->max_cpu_mins_pj == NO_VAL)
-		assoc->max_cpu_mins_pj = atoi(row[ASSOC_REQ_MCMPJ]);
+		assoc->max_cpu_mins_pj = atoll(row[ASSOC_REQ_MCMPJ]);
+	if(row[ASSOC_REQ_MCRM] && assoc->max_cpu_run_mins == NO_VAL)
+		assoc->max_cpu_run_mins = atoll(row[ASSOC_REQ_MCRM]);
 
 	if(assoc->qos_list) {
 		int set = 0;
@@ -558,6 +566,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		"max_cpus_pj",
 		"max_wall_pj",
 		"max_cpu_mins_pj",
+		"max_cpu_run_mins",
 		"qos",
 		"delta_qos",
 		"lft",
@@ -575,6 +584,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		ASSOC_MCPJ,
 		ASSOC_MWPJ,
 		ASSOC_MCMPJ,
+		ASSOC_MCRM,
 		ASSOC_QOS,
 		ASSOC_DELTA_QOS,
 		ASSOC_LFT,
@@ -650,6 +660,11 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			modified = 1;
 		}
 
+		if(!row[ASSOC_MCRM] && assoc->max_cpu_run_mins != NO_VAL) {
+			mod_assoc->max_cpu_run_mins = assoc->max_cpu_run_mins;
+			modified = 1;
+		}
+
 		if(!row[ASSOC_QOS][0] && assoc->qos_list) {
 			List delta_qos_list = NULL;
 			char *qos_char = NULL, *delta_char = NULL;
@@ -871,6 +886,22 @@ static int _setup_association_cond_limits(
 		xstrcat(*extra, ")");
 	}
 
+	if(assoc_cond->grp_cpu_run_mins_list
+	   && list_count(assoc_cond->grp_cpu_run_mins_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_cpu_run_mins_list);
+		while((object = list_next(itr))) {
+			if(set)
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_cpu_run_mins='%s'",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
 	if(assoc_cond->grp_cpus_list
 	   && list_count(assoc_cond->grp_cpus_list)) {
 		set = 0;
@@ -967,6 +998,22 @@ static int _setup_association_cond_limits(
 		xstrcat(*extra, ")");
 	}
 
+	if(assoc_cond->max_cpu_run_mins_list
+	   && list_count(assoc_cond->max_cpu_run_mins_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_cpu_run_mins_list);
+		while((object = list_next(itr))) {
+			if(set)
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.max_cpu_run_mins='%s'",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
 	if(assoc_cond->max_cpus_pj_list
 	   && list_count(assoc_cond->max_cpus_pj_list)) {
 		set = 0;
@@ -1298,7 +1345,11 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 			if((assoc->max_cpu_mins_pj == INFINITE)
 			   && row2[ASSOC2_REQ_MCMPJ])
 				assoc->max_cpu_mins_pj =
-					atoi(row2[ASSOC2_REQ_MCMPJ]);
+					atoll(row2[ASSOC2_REQ_MCMPJ]);
+			if((assoc->max_cpu_run_mins == INFINITE)
+			   && row2[ASSOC2_REQ_MCRM])
+				assoc->max_cpu_run_mins =
+					atoll(row2[ASSOC2_REQ_MCRM]);
 		}
 		mysql_free_result(result2);
 
@@ -1311,6 +1362,7 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 
 		mod_assoc->grp_cpus = assoc->grp_cpus;
 		mod_assoc->grp_cpu_mins = assoc->grp_cpu_mins;
+		mod_assoc->grp_cpu_run_mins = assoc->grp_cpu_run_mins;
 		mod_assoc->grp_jobs = assoc->grp_jobs;
 		mod_assoc->grp_nodes = assoc->grp_nodes;
 		mod_assoc->grp_submit_jobs = assoc->grp_submit_jobs;
@@ -1318,6 +1370,7 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 
 		mod_assoc->max_cpus_pj = assoc->max_cpus_pj;
 		mod_assoc->max_cpu_mins_pj = assoc->max_cpu_mins_pj;
+		mod_assoc->max_cpu_run_mins = assoc->max_cpu_run_mins;
 		mod_assoc->max_jobs = assoc->max_jobs;
 		mod_assoc->max_nodes_pj = assoc->max_nodes_pj;
 		mod_assoc->max_submit_jobs = assoc->max_submit_jobs;
@@ -1597,6 +1650,7 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 	uint32_t parent_mnpj = INFINITE;
 	uint32_t parent_mwpj = INFINITE;
 	uint64_t parent_mcmpj = INFINITE;
+	uint64_t parent_mcrm = INFINITE;
 	char *parent_acct = NULL;
 	char *parent_qos = NULL;
 	char *parent_delta_qos = NULL;
@@ -1735,11 +1789,16 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 		else
 			assoc->grp_wall = INFINITE;
 
-		if(row[ASSOC_REQ_GCH])
-			assoc->grp_cpu_mins = atoll(row[ASSOC_REQ_GCH]);
+		if(row[ASSOC_REQ_GCM])
+			assoc->grp_cpu_mins = atoll(row[ASSOC_REQ_GCM]);
 		else
 			assoc->grp_cpu_mins = INFINITE;
 
+		if(row[ASSOC_REQ_GCRM])
+			assoc->grp_cpu_run_mins = atoll(row[ASSOC_REQ_GCRM]);
+		else
+			assoc->grp_cpu_run_mins = INFINITE;
+
 		parent_acct = row[ASSOC_REQ_ACCT];
 		if(!without_parent_info
 		   && row[ASSOC_REQ_PARENT][0]) {
@@ -1821,6 +1880,12 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 				else
 					parent_mcmpj = INFINITE;
 
+				if(row2[ASSOC2_REQ_MCRM])
+					parent_mcrm =
+						atoll(row2[ASSOC2_REQ_MCRM]);
+				else
+					parent_mcmpj = INFINITE;
+
 				xfree(parent_qos);
 				if(row2[ASSOC2_REQ_QOS][0])
 					parent_qos =
@@ -1870,6 +1935,11 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 		else
 			assoc->max_cpu_mins_pj = parent_mcmpj;
 
+		if(row[ASSOC_REQ_MCRM])
+			assoc->max_cpu_run_mins = atoll(row[ASSOC_REQ_MCRM]);
+		else
+			assoc->max_cpu_run_mins = parent_mcrm;
+
 		assoc->qos_list = list_create(slurm_destroy_char);
 
 		/* do a plus 1 since a comma is the first thing there
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_qos.c b/src/plugins/accounting_storage/mysql/as_mysql_qos.c
index 133600955c576f3226fcf7c2b23c02ce49080fd6..778f0040cabc3aac4960d9a625c8aba51e6d36e4 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_qos.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_qos.c
@@ -88,16 +88,6 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*vals, ", '%s'", qos->description);
 		xstrfmtcat(*extra, ", description='%s'",
 			   qos->description);
-
-	}
-	if((int)qos->priority >= 0) {
-		xstrcat(*cols, ", priority");
-		xstrfmtcat(*vals, ", %d", qos->priority);
-		xstrfmtcat(*extra, ", priority=%d", qos->priority);
-	} else if ((int)qos->priority == INFINITE) {
-		xstrcat(*cols, ", priority");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", priority=NULL");
 	}
 
 	if((int)qos->grp_cpu_mins >= 0) {
@@ -111,6 +101,17 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrcat(*extra, ", grp_cpu_mins=NULL");
 	}
 
+	if((int)qos->grp_cpu_run_mins >= 0) {
+		xstrcat(*cols, ", grp_cpu_run_mins");
+		xstrfmtcat(*vals, ", %llu", qos->grp_cpu_run_mins);
+		xstrfmtcat(*extra, ", grp_cpu_run_mins=%llu",
+			   qos->grp_cpu_run_mins);
+	} else if((int)qos->grp_cpu_run_mins == INFINITE) {
+		xstrcat(*cols, ", grp_cpu_run_mins");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_cpu_run_mins=NULL");
+	}
+
 	if((int)qos->grp_cpus >= 0) {
 		xstrcat(*cols, ", grp_cpus");
 		xstrfmtcat(*vals, ", %u", qos->grp_cpus);
@@ -175,6 +176,17 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrcat(*extra, ", max_cpu_mins_per_job=NULL");
 	}
 
+	if((int)qos->max_cpu_run_mins_pu >= 0) {
+		xstrcat(*cols, ", max_cpu_run_mins_per_user");
+		xstrfmtcat(*vals, ", %llu", qos->max_cpu_run_mins_pu);
+		xstrfmtcat(*extra, ", max_cpu_run_mins_per_user=%u",
+			   qos->max_cpu_run_mins_pu);
+	} else if((int)qos->max_cpu_run_mins_pu == INFINITE) {
+		xstrcat(*cols, ", max_cpu_run_mins_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_cpu_run_mins_per_user=NULL");
+	}
+
 	if((int)qos->max_cpus_pj >= 0) {
 		xstrcat(*cols, ", max_cpus_per_job");
 		xstrfmtcat(*vals, ", %u", qos->max_cpus_pj);
@@ -273,6 +285,27 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xfree(preempt_val);
 	}
 
+	qos->preempt_mode &= (~PREEMPT_MODE_GANG);
+	if((int)qos->preempt_mode >= 0) {
+		xstrcat(*cols, ", preempt_mode");
+		xstrfmtcat(*vals, ", %u", qos->preempt_mode);
+		xstrfmtcat(*extra, ", preempt_mode=%u", qos->preempt_mode);
+	} else if ((int)qos->preempt_mode == INFINITE) {
+		xstrcat(*cols, ", preempt_mode");
+		xstrcat(*vals, ", 0");
+		xstrcat(*extra, ", preempt_mode=0");
+	}
+
+	if((int)qos->priority >= 0) {
+		xstrcat(*cols, ", priority");
+		xstrfmtcat(*vals, ", %u", qos->priority);
+		xstrfmtcat(*extra, ", priority=%u", qos->priority);
+	} else if ((int)qos->priority == INFINITE) {
+		xstrcat(*cols, ", priority");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", priority=NULL");
+	}
+
 	if((int)qos->usage_factor >= 0) {
 		xstrcat(*cols, ", usage_factor");
 		xstrfmtcat(*vals, ", %f", qos->usage_factor);
@@ -512,6 +545,7 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		qos_rec->id = id;
 		qos_rec->grp_cpus = qos->grp_cpus;
 		qos_rec->grp_cpu_mins = qos->grp_cpu_mins;
+		qos_rec->grp_cpu_run_mins = qos->grp_cpu_run_mins;
 		qos_rec->grp_jobs = qos->grp_jobs;
 		qos_rec->grp_nodes = qos->grp_nodes;
 		qos_rec->grp_submit_jobs = qos->grp_submit_jobs;
@@ -519,11 +553,13 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		qos_rec->max_cpus_pj = qos->max_cpus_pj;
 		qos_rec->max_cpu_mins_pj = qos->max_cpu_mins_pj;
+		qos_rec->max_cpu_run_mins_pu = qos->max_cpu_run_mins_pu;
 		qos_rec->max_jobs_pu  = qos->max_jobs_pu;
 		qos_rec->max_nodes_pj = qos->max_nodes_pj;
 		qos_rec->max_submit_jobs_pu  = qos->max_submit_jobs_pu;
 		qos_rec->max_wall_pj = qos->max_wall_pj;
 
+		qos_rec->preempt_mode = qos->preempt_mode;
 		qos_rec->priority = qos->priority;
 
 		if(qos->preempt_list) {
@@ -782,18 +818,21 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"description",
 		"id",
 		"grp_cpu_mins",
+		"grp_cpu_run_mins",
 		"grp_cpus",
 		"grp_jobs",
 		"grp_nodes",
 		"grp_submit_jobs",
 		"grp_wall",
 		"max_cpu_mins_per_job",
+		"max_cpu_run_mins_per_user",
 		"max_cpus_per_job",
 		"max_jobs_per_user",
 		"max_nodes_per_job",
 		"max_submit_jobs_per_user",
 		"max_wall_duration_per_job",
 		"preempt",
+		"preempt_mode",
 		"priority",
 		"usage_factor",
 	};
@@ -801,19 +840,22 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_NAME,
 		QOS_REQ_DESC,
 		QOS_REQ_ID,
-		QOS_REQ_GCH,
+		QOS_REQ_GCM,
+		QOS_REQ_GCRM,
 		QOS_REQ_GC,
 		QOS_REQ_GJ,
 		QOS_REQ_GN,
 		QOS_REQ_GSJ,
 		QOS_REQ_GW,
 		QOS_REQ_MCMPJ,
+		QOS_REQ_MCRM,
 		QOS_REQ_MCPJ,
 		QOS_REQ_MJPU,
 		QOS_REQ_MNPJ,
 		QOS_REQ_MSJPU,
 		QOS_REQ_MWPJ,
 		QOS_REQ_PREE,
+		QOS_REQ_PREEM,
 		QOS_REQ_PRIO,
 		QOS_REQ_UF,
 		QOS_REQ_COUNT
@@ -913,10 +955,14 @@ empty:
 		if(row[QOS_REQ_NAME] && row[QOS_REQ_NAME][0])
 			qos->name =  xstrdup(row[QOS_REQ_NAME]);
 
-		if(row[QOS_REQ_GCH])
-			qos->grp_cpu_mins = atoll(row[QOS_REQ_GCH]);
+		if(row[QOS_REQ_GCM])
+			qos->grp_cpu_mins = atoll(row[QOS_REQ_GCM]);
 		else
 			qos->grp_cpu_mins = INFINITE;
+		if(row[QOS_REQ_GCRM])
+			qos->grp_cpu_run_mins = atoll(row[QOS_REQ_GCRM]);
+		else
+			qos->grp_cpu_run_mins = INFINITE;
 		if(row[QOS_REQ_GC])
 			qos->grp_cpus = atoi(row[QOS_REQ_GC]);
 		else
@@ -942,6 +988,10 @@ empty:
 			qos->max_cpu_mins_pj = atoi(row[QOS_REQ_MCMPJ]);
 		else
 			qos->max_cpu_mins_pj = INFINITE;
+		if(row[QOS_REQ_MCRM])
+			qos->max_cpu_run_mins_pu = atoi(row[QOS_REQ_MCRM]);
+		else
+			qos->max_cpu_run_mins_pu = INFINITE;
 		if(row[QOS_REQ_MCPJ])
 			qos->max_cpus_pj = atoi(row[QOS_REQ_MCPJ]);
 		else
@@ -968,6 +1018,8 @@ empty:
 				qos->preempt_bitstr = bit_alloc(g_qos_count);
 			bit_unfmt(qos->preempt_bitstr, row[QOS_REQ_PREE]+1);
 		}
+		if(row[QOS_REQ_PREEM])
+			qos->preempt_mode = atoi(row[QOS_REQ_PREEM]);
 		if(row[QOS_REQ_PRIO])
 			qos->priority = atoi(row[QOS_REQ_PRIO]);
 
diff --git a/src/plugins/preempt/qos/preempt_qos.c b/src/plugins/preempt/qos/preempt_qos.c
index 0fb3544c3d6738b6afec74eeadcaa28beb8f9d8e..be868afd320a0cad071ebfcd9120bc6b18300ee3 100644
--- a/src/plugins/preempt/qos/preempt_qos.c
+++ b/src/plugins/preempt/qos/preempt_qos.c
@@ -186,6 +186,10 @@ static int _sort_by_prio (void *x, void *y)
 /**************************************************************************/
 extern uint16_t job_preempt_mode(struct job_record *job_ptr)
 {
+	if (job_ptr->qos_ptr &&
+	    ((slurmdb_qos_rec_t *)job_ptr->qos_ptr)->preempt_mode)
+		return ((slurmdb_qos_rec_t *)job_ptr->qos_ptr)->preempt_mode;
+
 	return (slurm_get_preempt_mode() & (~PREEMPT_MODE_GANG));
 }
 
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index 4c28c0a2478d512df7af92d252458f3c952b4a7d..2d92a45dfde83b51fa0c82902116a93a19bb62cf 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -39,6 +39,77 @@
 
 #include "src/sacctmgr/sacctmgr.h"
 
+static uint16_t _parse_preempt_modes(char *names)
+{
+	int i=0, start=0;
+	char *name = NULL;
+	char quote_c = '\0';
+	int quote = 0;
+	int count = 0;
+	uint16_t preempt_mode = 0;
+	uint16_t ret_mode = 0;
+
+	if(names) {
+		if (names[i] == '\"' || names[i] == '\'') {
+			quote_c = names[i];
+			quote = 1;
+			i++;
+		}
+		start = i;
+		while(names[i]) {
+			//info("got %d - %d = %d", i, start, i-start);
+			if(quote && names[i] == quote_c)
+				break;
+			else if (names[i] == '\"' || names[i] == '\'')
+				names[i] = '`';
+			else if(names[i] == ',') {
+				name = xmalloc((i-start+1));
+				memcpy(name, names+start, (i-start));
+				//info("got %s %d", name, i-start);
+
+				ret_mode = preempt_mode_num(name);
+				if(ret_mode == (uint16_t)NO_VAL) {
+					error("Unknown preempt_mode given '%s'",
+					      name);
+					xfree(name);
+					preempt_mode = (uint16_t)NO_VAL;
+					break;
+				}
+				preempt_mode |= ret_mode;
+				count++;
+				xfree(name);
+
+				i++;
+				start = i;
+				if(!names[i]) {
+					info("There is a problem with "
+					     "your request.  It appears you "
+					     "have spaces inside your list.");
+					break;
+				}
+			}
+			i++;
+		}
+
+		name = xmalloc((i-start+1));
+		memcpy(name, names+start, (i-start));
+		//info("got %s %d", name, i-start);
+
+		ret_mode = preempt_mode_num(name);
+		if(ret_mode == (uint16_t)NO_VAL) {
+			error("Unknown preempt_mode given '%s'",
+			      name);
+			xfree(name);
+			preempt_mode = (uint16_t)NO_VAL;
+			return preempt_mode;
+		}
+		preempt_mode |= ret_mode;
+		count++;
+		xfree(name);
+	}
+	return preempt_mode;
+}
+
 static int _set_cond(int *start, int argc, char *argv[],
 		     slurmdb_qos_cond_t *qos_cond,
 		     List format_list)
@@ -123,6 +194,19 @@ static int _set_cond(int *start, int argc, char *argv[],
 				}
 			}
 			list_iterator_destroy(itr);
+		} else if (!strncasecmp (argv[i], "PreemptMode",
+					 MAX(command_len, 3))) {
+			if(!qos_cond)
+				continue;
+			qos_cond->preempt_mode |=
+				_parse_preempt_modes(argv[i]+end);
+			if(qos_cond->preempt_mode == (uint16_t)NO_VAL) {
+				fprintf(stderr,
+					" Bad Preempt Mode given: %s\n",
+					argv[i]);
+				exit_code = 1;
+			} else
+				set = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
@@ -276,7 +360,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 					argv[i]);
 			}
 		} else if (!strncasecmp (argv[i], "Preempt",
-					 MAX(command_len, 3))) {
+					 MAX(command_len, 7))) {
 			if(!qos)
 				continue;
 
@@ -294,6 +378,18 @@ static int _set_rec(int *start, int argc, char *argv[],
 				set = 1;
 			else
 				exit_code = 1;
+		} else if (!strncasecmp (argv[i], "PreemptMode",
+					 MAX(command_len, 8))) {
+			if(!qos)
+				continue;
+			qos->preempt_mode = preempt_mode_num(argv[i]+end);
+			if(qos->preempt_mode == (uint16_t)NO_VAL) {
+				fprintf(stderr,
+					" Bad Preempt Mode given: %s\n",
+					argv[i]);
+				exit_code = 1;
+			} else
+				set = 1;
 		} else if (!strncasecmp (argv[i], "Priority",
 					 MAX(command_len, 3))) {
 			if(!qos)
@@ -495,6 +591,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 		PRINT_MAXS,
 		PRINT_MAXW,
 		PRINT_PREE,
+		PRINT_PREEM,
 		PRINT_PRIO,
 		PRINT_UF,
 	};
@@ -512,7 +609,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	} else if(!list_count(format_list)) {
-		slurm_addto_char_list(format_list, "N,Prio,Pree,"
+		slurm_addto_char_list(format_list, "N,Prio,Preempt,PreemptM,"
 				      "GrpJ,GrpN,GrpS,MaxJ,MaxN,MaxS,MaxW");
 	}
 
@@ -632,11 +729,17 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->len = 10;
 			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Preempt", object,
-				       MAX(command_len, 3))) {
+				       MAX(command_len, 7))) {
 			field->type = PRINT_PREE;
 			field->name = xstrdup("Preempt");
 			field->len = 10;
 			field->print_routine = sacctmgr_print_qos_bitstr;
+		} else if(!strncasecmp("PreemptMode", object,
+				       MAX(command_len, 8))) {
+			field->type = PRINT_PREEM;
+			field->name = xstrdup("PreemptMode");
+			field->len = 11;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Priority", object,
 				       MAX(command_len, 3))) {
 			field->type = PRINT_PRIO;
@@ -775,6 +878,19 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 					field, g_qos_list, qos->preempt_bitstr,
 					(curr_inx == field_count));
 				break;
+			case PRINT_PREEM:
+			{
+				char *tmp_char = "cluster";
+				if(qos->preempt_mode)
+					tmp_char = xstrtolower(
+						preempt_mode_string(
+							qos->preempt_mode));
+				field->print_routine(
+					field,
+					tmp_char,
+					(curr_inx == field_count));
+				break;
+			}
 			case PRINT_PRIO:
 				field->print_routine(
 					field, qos->priority,
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 9bd2e3cba8f0c95d18f93cd0427e12f6d9e9bab5..87f5eba25b71398a1a72ea5400bb59839fc3140a 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -625,8 +625,8 @@ static void _show_it (int argc, char *argv[])
 		error_code = sacctmgr_list_problem((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "QOS", MAX(command_len, 1)) == 0) {
 		error_code = sacctmgr_list_qos((argc - 1), &argv[1]);
-	} else if (strncasecmp (argv[0], "Transactions",
-				MAX(command_len, 1)) == 0) {
+	} else if (!strncasecmp (argv[0], "Transactions", MAX(command_len, 1))
+		   || !strncasecmp (argv[0], "Txn", MAX(command_len, 1))) {
 		error_code = sacctmgr_list_txn((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Users", MAX(command_len, 1)) == 0) {
 		error_code = sacctmgr_list_user((argc - 1), &argv[1]);
@@ -858,11 +858,12 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             Start=, States=, and User=                     \n\
                                                                            \n\
        list qos           - Descriptions=, Format=, Ids=, Names=,          \n\
-                            and WithDeleted                                \n\
+                            PreemptMode=, and WithDeleted                  \n\
        add qos            - Description=, GrpCPUMins=, GrpCPUs=, GrpJobs=, \n\
                             GrpNodes=, GrpSubmitJob=, GrpWall=,            \n\
                             MaxCPUMins=, MaxJobs=, MaxNodes=, MaxSubmitJobs=,\n\
-                            MaxWall=, Preempt=, Priority=, and Names=      \n\
+                            MaxWall=, Preempt=, PreemptMode=, Priority=,   \n\
+                            and Names=                                     \n\
        delete qos         - Descriptions=, IDs=, and Names=                \n\
                                                                            \n\
        list transactions  - Accounts=, Action=, Actor=, Clusters=, End=,   \n\
@@ -932,7 +933,7 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             GrpNodes, GrpSubmitJob, GrpWall, ID,           \n\
                             MaxCPUMins, MaxCPUs, MaxJobs, MaxNodes,        \n\
                             MaxSubmitJobs, MaxWall, Name,                  \n\
-                            Preempt, Priority, UsageFactor                 \n\
+                            Preempt, PreemptMode, Priority, UsageFactor    \n\
                                                                            \n\
        Transactions       - Action, Actor, Info, TimeStamp, Where          \n\
                                                                            \n\
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 4e8d76b1855d1ab9c07c08d4c155c82fb3fee763..647f2016134584d484d46950f4ce3088f76494cb 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -116,6 +116,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->user_list,
 						 argv[i]+end))
 				u_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "Account",
 					 MAX(command_len, 2))
 			   || !strncasecmp (argv[i], "Acct",
@@ -127,6 +129,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->acct_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "AdminLevel",
 					 MAX(command_len, 2))) {
 			user_cond->admin_level =
@@ -141,6 +145,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->cluster_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "DefaultAccount",
 					 MAX(command_len, 8))) {
 			if(!user_cond->def_acct_list) {
@@ -150,6 +156,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(user_cond->def_acct_list,
 						 argv[i]+end))
 				u_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "DefaultWCKey",
 					 MAX(command_len, 8))) {
 			if(!user_cond->def_wckey_list) {
@@ -159,6 +167,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(user_cond->def_wckey_list,
 						 argv[i]+end))
 				u_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 1))) {
 			if(format_list)
@@ -173,6 +183,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->fairshare_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
 					 MAX(command_len, 7))) {
 			if(!assoc_cond->grp_cpu_mins_list)
@@ -181,6 +193,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->grp_cpu_mins_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "GrpCpus",
 					 MAX(command_len, 7))) {
 			if(!assoc_cond->grp_cpus_list)
@@ -189,6 +203,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->grp_cpus_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "GrpJobs",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->grp_jobs_list)
@@ -197,6 +213,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->grp_jobs_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "GrpNodes",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->grp_nodes_list)
@@ -205,6 +223,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(assoc_cond->grp_nodes_list,
 						 argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "GrpSubmitJobs",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->grp_submit_jobs_list)
@@ -214,6 +234,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->grp_submit_jobs_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "GrpWall",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->grp_wall_list)
@@ -223,6 +245,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->grp_wall_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "MaxCPUMinsPerJob",
 					 MAX(command_len, 7))) {
 			if(!assoc_cond->max_cpu_mins_pj_list)
@@ -232,6 +256,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->max_cpu_mins_pj_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "MaxCpusPerJob",
 					 MAX(command_len, 7))) {
 			if(!assoc_cond->max_cpus_pj_list)
@@ -241,6 +267,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->max_cpus_pj_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "MaxJobs",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->max_jobs_list)
@@ -250,6 +278,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->max_jobs_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "MaxNodesPerJob",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->max_nodes_pj_list)
@@ -259,6 +289,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->max_nodes_pj_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "MaxSubmitJobs",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->max_submit_jobs_list)
@@ -268,6 +300,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->max_submit_jobs_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "MaxWallDurationPerJob",
 					 MAX(command_len, 4))) {
 			if(!assoc_cond->max_wall_pj_list)
@@ -277,6 +311,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->max_wall_pj_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "Partition",
 					 MAX(command_len, 3))) {
 			if(!assoc_cond->partition_list) {
@@ -287,6 +323,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				   assoc_cond->partition_list,
 				   argv[i]+end))
 				a_set = 1;
+			else
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "QosLevel",
 					 MAX(command_len, 1))) {
 			if(!assoc_cond->qos_list) {
@@ -299,10 +337,12 @@ static int _set_cond(int *start, int argc, char *argv[],
 					db_conn, my_uid, NULL);
 			}
 
-			slurmdb_addto_qos_char_list(assoc_cond->qos_list,
-						    g_qos_list, argv[i]+end,
-						    option);
-			u_set = 1;
+			if(slurmdb_addto_qos_char_list(assoc_cond->qos_list,
+						       g_qos_list, argv[i]+end,
+						       option))
+				u_set = 1;
+			else
+				exit_code=1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
@@ -732,21 +772,24 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 		if(!end
 		   || !strncasecmp (argv[i], "Names", MAX(command_len, 1))
 		   || !strncasecmp (argv[i], "Users", MAX(command_len, 1))) {
-			slurm_addto_char_list(assoc_cond->user_list,
-					      argv[i]+end);
+			if(!slurm_addto_char_list(assoc_cond->user_list,
+						 argv[i]+end))
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "Accounts",
 					 MAX(command_len, 2))
 			   || !strncasecmp (argv[i], "Acct",
 					    MAX(command_len, 4))) {
-			slurm_addto_char_list(assoc_cond->acct_list,
-					      argv[i]+end);
+			if(!slurm_addto_char_list(assoc_cond->acct_list,
+						  argv[i]+end))
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "AdminLevel",
 					 MAX(command_len, 2))) {
 			admin_level = str_2_slurmdb_admin_level(argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Clusters",
 					 MAX(command_len, 1))) {
-			slurm_addto_char_list(assoc_cond->cluster_list,
-					      argv[i]+end);
+			if(!slurm_addto_char_list(assoc_cond->cluster_list,
+						  argv[i]+end))
+				exit_code=1;
 		} else if (!strncasecmp (argv[i], "DefaultAccount",
 					 MAX(command_len, 8))) {
 			if(default_acct) {
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 4cf44c6746e5550e68d8f6489cbdaac672b8d9af..7b9edc21d09a77ce188e6f2f4231e86d4187fb92 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -742,7 +742,7 @@ int init_part_conf(void)
 	xfree(default_part.allow_groups);
 	xfree(default_part.allow_uids);
 	xfree(default_part.allow_alloc_nodes);
-	xfree(default_part.alternate); 	
+	xfree(default_part.alternate);
 	FREE_NULL_BITMAP(default_part.node_bitmap);
 
 	if (part_list)		/* delete defunct partitions */