diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index f3323d75c8fa8c9b58d59a4d9f48e7f8e00a2c8e..71f87901adfd44cf415dae45015805be31ff12d7 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -193,7 +193,7 @@ account and user associations inside their realm.
  
 .TP
 \fIqos\fR
-Quality of Service (For use with MOAB only).
+Quality of Service.
 
 .TP
 \fItransaction\fR
@@ -308,8 +308,8 @@ This is a c\-node limit on BlueGene systems.
 
 .TP
 \fIMaxSubmitJobs\fP=<max jobs>
-Maximum number of jobs which can this account can have in a pending or running
-state at any time.
+Maximum number of jobs which can this association can have in a
+pending or running state at any time.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new value of \-1.
 
@@ -325,7 +325,6 @@ To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
 \fIQosLevel\fP<operator><comma separated list of qos names>
-(For use with MOAB only.)
 Specify the default Quality of Service's that jobs are able to run at
 for this account.  To get a list of valid QOS's use 'sacctmgr list qos'. 
 This value will override it's parents value and push down to it's
@@ -347,6 +346,7 @@ Add the specified <qos> value to the current \fIQosLevel\fP .
 Remove the specified <qos> value from the current \fIQosLevel\fP.
 .RE
 
+
 .SH "SPECIFICATIONS FOR ACCOUNTS"
 
 .TP
@@ -378,7 +378,7 @@ this account may have.  These extra options can be found in the
 general specifications list above in the \fIGENERAL SPECIFICATIONS FOR
 ASSOCIATION BASED ENTITIES\fP section.
 
-.SH "LIST/SHOW ACCOUNTS FORMAT OPTIONS"
+.SH "LIST/SHOW ACCOUNT FORMAT OPTIONS"
 
 .TP
 \fIAccount\fP
@@ -462,6 +462,120 @@ from the parent.)
 .RE
 
 
+.SH "LIST/SHOW ASSOCIATION FORMAT OPTIONS"
+
+.TP
+\fIAccount\fP
+The name of a bank account in the association.
+
+.TP
+\fICluster\fP
+The name of a cluster in the association.
+
+.TP
+\fIFairshare\fP
+Number used in conjunction with other accounts to determine job priority.
+
+.TP
+\fIGrpCPUMins\fP
+Maximum number of CPU minutes running jobs are able to be allocated in 
+aggregate for this association and all association which are children
+of this association.
+
+.TP
+\fIGrpCPUs\fP
+Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+
+.TP
+\fIGrpJobs\fP
+Maximum number of running jobs in aggregate for 
+this association and all association which are children of this association.
+
+.TP
+\fIGrpNodes\fP
+Maximum number of nodes running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+
+.TP
+\fIGrpSubmitJobs\fP
+Maximum number of jobs which can be in a pending or running state at any time 
+in aggregate for this association and all association which are children of 
+this association.
+
+.TP
+\fIGrpWall\fP
+Maximum wall clock time running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+
+.TP
+\fIID\fP
+The id of the association.
+
+.TP
+\fILFT\fP
+Associations are kept in a hierarchy this is the left most
+spot in the hierarchy.  When used with the RGT variable all
+associations with a LFT inside this LFT and before the RGT are
+childern of this association.
+
+.TP
+\fIMaxCPUMins\fP 
+Maximum number of CPU minutes each job is able to use.
+
+.TP
+\fIMaxCPUs\fP
+Maximum number of CPUs each job is able to use.
+
+.TP
+\fIMaxJobs\fP
+Maximum number of jobs each user is allowed to run at one time.
+
+.TP
+\fIMaxNodes\fP
+Maximum number of nodes each job is able to use.
+
+.TP
+\fIMaxSubmitJobs\fP
+Maximum number of jobs pending or running state at any time.
+
+.TP
+\fIMaxWall\fP
+Maximum wall clock time each job is able to use.
+
+.TP
+\fIQos\fP
+Valid QOS\' for this association.
+
+.TP
+\fIParentID\fP
+The association id of the parent of this association.
+
+.TP
+\fIParentName\fP
+The account name of the parent of this association.
+
+.TP
+\fIPartition\fP
+The name of a partition in the association.
+
+.TP
+\fIRawQOS\fP
+The numeric values of valid QOS\' for this association.
+
+.TP
+\fIRGT\fP
+Associations are kept in a hierarchy this is the right most
+spot in the hierarchy.  When used with the LFT variable all
+associations with a LFT inside this RGT and after the LFT are
+childern of this association.
+
+.TP
+\fIUser\fP
+The name of a user in the association.
+.RE
+
+
 .SH "SPECIFICATIONS FOR CLUSTERS"
 
 .TP
@@ -536,12 +650,201 @@ account or list user.
 
 
 .SH "SPECIFICATIONS FOR QOS"
+
 .TP
-\fIDescription\fP=<description>
-An arbitrary string describing an account.
+\fIGrpCPUMins\fP
+Maximum number of CPU minutes running jobs are able to be allocated in 
+aggregate for this QOS.
+
+.TP
+\fIGrpCPUs\fP
+Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+this QOS.
+
+.TP
+\fIGrpJobs\fP
+Maximum number of running jobs in aggregate for this QOS.
+
+.TP
+\fIGrpNodes\fP
+Maximum number of nodes running jobs are able to be allocated in aggregate for 
+this QOS.
+
+.TP
+\fIGrpSubmitJobs\fP
+Maximum number of jobs which can be in a pending or running state at any time 
+in aggregate for this QOS.
+
+.TP
+\fIGrpWall\fP
+Maximum wall clock time running jobs are able to be allocated in aggregate for 
+this QOS.
+
+.TP
+\fIID\fP
+The id of the QOS.
+
+.TP
+\fIMaxCPUMins\fP 
+Maximum number of CPU minutes each job is able to use.
+
+.TP
+\fIMaxCPUs\fP
+Maximum number of CPUs each job is able to use.
+
+.TP
+\fIMaxJobs\fP
+Maximum number of jobs each user is allowed to run at one time.
+
 .TP
-\fINames\fP=<qos>
-Names of qos.
+\fIMaxNodes\fP
+Maximum number of nodes each job is able to use.
+
+.TP
+\fIMaxSubmitJobs\fP
+Maximum number of jobs pending or running state at any time per user.
+
+.TP
+\fIMaxWall\fP
+Maximum wall clock time each job is able to use.
+
+.TP
+\fIName\fP
+Name of the QOS.
+
+.TP
+\fIPreempt\fP
+Other QOS\' this QOS can preempt.
+
+.TP
+\fIPriority\fP
+What priority will be added to a job\'s priority when using this QOS.
+
+.TP
+\fIUsageFactor\fP
+Usage factor when running with this QOS
+.RE
+
+
+.SH "LIST/SHOW QOS FORMAT OPTIONS"
+
+.TP
+\fIDescription\fP
+An arbitrary string describing a QOS.
+
+.TP
+\fIGrpCPUMins\fP
+Maximum number of CPU minutes running jobs are able to be allocated in 
+aggregate for this QOS.
+To clear a previously set value use the modify command with a new
+value of \-1. 
+NOTE: This limit only applys when using the Priority Multifactor
+plugin.  The time is decayed using the value of PriorityDecayHalfLife
+or PriorityUsageResetPeriod as set in the slurm.conf.  Currently when
+this limit is reached jobs will be delayed until they are able to run
+inside the limit.  No jobs will be killed if this limit is reached,
+this will change in future versions of SLURM.)
+
+.TP
+\fIGrpCPUs\fP
+Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+this QOS.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
+\fIGrpJobs\fP
+Maximum number of running jobs in aggregate for this QOS.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpNodes\fP
+Maximum number of nodes running jobs are able to be allocated in aggregate for 
+this QOS.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpSubmitJobs\fP
+Maximum number of jobs which can be in a pending or running state at any time 
+in aggregate for this QOS.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpWall\fP
+Maximum wall clock time running jobs are able to be allocated in aggregate for 
+this QOS.
+To clear a previously set value use the modify command with a new value of \-1.
+NOTE: This limit only applys when using the Priority Multifactor
+plugin.  The time is decayed using the value of PriorityDecayHalfLife
+or PriorityUsageResetPeriod as set in the slurm.conf.  Currently when
+this limit is reached jobs will be delayed until they are able to run
+inside the limit.  No jobs will be killed if this limit is reached,
+this will change in future versions of SLURM.)
+
+.TP
+\fIMaxCPUMins\fP 
+Maximum number of CPU minutes each job is able to use.
+To clear a previously set value use the modify command with a new
+value of \-1. 
+
+.TP
+\fIMaxCPUs\fP
+Maximum number of CPUs each job is able to use.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
+\fIMaxJobs\fP
+Maximum number of jobs each user is allowed to run at one time.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIMaxNodes\fP
+Maximum number of nodes each job is able to use.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIMaxSubmitJobs\fP
+Maximum number of jobs pending or running state at any time per user.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIMaxWall\fP
+Maximum wall clock time each job is able to use.
+<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or 
+<days>\-<hr>:<min>:<sec> or <days>\-<hr>.
+The value is recorded in minutes with rounding as needed.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIName\fP
+Name of the QOS.  Needed for creation.
+
+.TP
+\fIPreempt\fP
+Other QOS\' this QOS can preempt.
+Setting a Preempt to '' (two single
+quotes with nothing between them) restores it's default setting.  You
+can also use the operator += and \-= to add or remove certain QOS's
+from a QOS list.
+
+.TP
+\fIPriority\fP
+What priority will be added to a job\'s priority when using this QOS.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIUsageFactor\fP
+Usage factor when running with this QOS.  This is a float that is
+factored into the time of running jobs.  i.e. if the usagefactor of a
+QOS was 2 for every cpu second a job ran it would count for 2.  Also
+if the usagefactor was .5 every second would only could for half the time.
+To clear a previously set value use the modify command with a new value of \-1.
+
 .RE
 
 
@@ -776,13 +1079,13 @@ allocated in aggregate for this association and all association which
 are children of this association. 
 .TP
 \fIFairShare=\fP
-To be used with a scheduler like MOAB to determine priority.
+Number used in conjunction with other associations to determine job priority.
 .TP
 \fIMaxJobs=\fP
-Maximum number of jobs the children of this account can run.
+Maximum number of jobs the children of this association can run.
 .TP
 \fIMaxNodesPerJob=\fP
-Maximum number of nodes per job the children of this account can run.
+Maximum number of nodes per job the children of this association can run.
 .TP
 \fIMaxProcSecondsPerJob=\fP
 Maximum cpu seconds children of this accounts jobs can run.
@@ -850,13 +1153,13 @@ allocated in aggregate for this association and all association which
 are children of this association. 
 .TP
 \fIFairShare=\fP
-To be used with a scheduler like MOAB to determine priority.
+Number used in conjunction with other associations to determine job priority.
 .TP
 \fIMaxJobs=\fP
-Maximum number of jobs the children of this account can run.
+Maximum number of jobs the children of this association can run.
 .TP
 \fIMaxNodesPerJob=\fP
-Maximum number of nodes per job the children of this account can run.
+Maximum number of nodes per job the children of this association can run.
 .TP
 \fIMaxProcSecondsPerJob=\fP
 Maximum cpu seconds children of this accounts jobs can run.
@@ -898,7 +1201,7 @@ system wide default account name
 \fBMust be defined on the first occurrence of the user.\fP
 .TP
 \fIFairShare=\fP
-To be used with a scheduler like MOAB to determine priority.
+Number used in conjunction with other associations to determine job priority.
 .TP
 \fIMaxJobs=\fP
 Maximum number of jobs this user can run.
@@ -1029,7 +1332,6 @@ right> sacctmgr modify user name=adam set fairshare=10 where cluster=tux
 .br
 
 .br
-(For use with MOAB only)
 When changing qos for something only use the '=' operator when wanting
 to explicitly set the qos to something.  In most cases you will want
 to use the '+=' or '\-=' operator to either add to or remove from the
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 68f1c5f8f1125beac7ef62316f699d3339bacd29..740836bfe411357c84b8a7bf46da757cd7f7ed8f 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -1015,27 +1015,33 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 	debug3("found correct association");
 	if (assoc_pptr)
 		*assoc_pptr = ret_assoc;
-	assoc->id = ret_assoc->id;
-	if(!assoc->user)
-		assoc->user = ret_assoc->user;
-	assoc->uid = ret_assoc->uid;
+
+	assoc->id              = ret_assoc->id;
 
 	if(!assoc->acct)
-		assoc->acct = ret_assoc->acct;
+		assoc->acct    = ret_assoc->acct;
+	
+	if(!assoc->childern_list)
+		assoc->childern_list = ret_assoc->childern_list;
+
 	if(!assoc->cluster)
 		assoc->cluster = ret_assoc->cluster;
-	if(!assoc->partition)
-		assoc->partition = ret_assoc->partition;
 
-	assoc->shares_raw       = ret_assoc->shares_raw;
-
-	assoc->grp_cpu_mins   = ret_assoc->grp_cpu_mins;
+	assoc->grp_cpu_mins    = ret_assoc->grp_cpu_mins;
 	assoc->grp_cpus        = ret_assoc->grp_cpus;
 	assoc->grp_jobs        = ret_assoc->grp_jobs;
 	assoc->grp_nodes       = ret_assoc->grp_nodes;
 	assoc->grp_submit_jobs = ret_assoc->grp_submit_jobs;
 	assoc->grp_wall        = ret_assoc->grp_wall;
 
+	assoc->grp_used_cpus   = ret_assoc->grp_used_cpus;
+	assoc->grp_used_nodes  = ret_assoc->grp_used_nodes;
+	assoc->grp_used_wall   = ret_assoc->grp_used_wall;
+
+	assoc->level_shares    = ret_assoc->level_shares;
+
+	assoc->lft             = ret_assoc->lft;
+
 	assoc->max_cpu_mins_pj = ret_assoc->max_cpu_mins_pj;
 	assoc->max_cpus_pj     = ret_assoc->max_cpus_pj;
 	assoc->max_jobs        = ret_assoc->max_jobs;
@@ -1043,12 +1049,6 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 	assoc->max_submit_jobs = ret_assoc->max_submit_jobs;
 	assoc->max_wall_pj     = ret_assoc->max_wall_pj;
 
-	if(assoc->valid_qos) {
-		FREE_NULL_BITMAP(assoc->valid_qos);
-		assoc->valid_qos = bit_copy(ret_assoc->valid_qos);
-	} else
-		assoc->valid_qos = ret_assoc->valid_qos;
-
 	if(assoc->parent_acct) {
 		xfree(assoc->parent_acct);
 		assoc->parent_acct       = xstrdup(ret_assoc->parent_acct);
@@ -1057,6 +1057,35 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 	assoc->parent_assoc_ptr          = ret_assoc->parent_assoc_ptr;
 	assoc->parent_id                 = ret_assoc->parent_id;
 
+	if(!assoc->partition)
+		assoc->partition = ret_assoc->partition;
+
+	if(!assoc->qos_list)
+		assoc->qos_list = ret_assoc->qos_list;
+
+	assoc->rgt              = ret_assoc->rgt;
+
+	assoc->shares_norm      = ret_assoc->shares_norm;
+	assoc->shares_raw       = ret_assoc->shares_raw;
+
+	assoc->uid              = ret_assoc->uid;
+
+	assoc->usage_efctv      = ret_assoc->usage_efctv;
+	assoc->usage_norm       = ret_assoc->usage_norm;
+	assoc->usage_raw        = ret_assoc->usage_raw;
+
+	assoc->used_jobs        = ret_assoc->used_jobs;
+	assoc->used_submit_jobs = ret_assoc->used_submit_jobs;
+
+	if(!assoc->user)
+		assoc->user = ret_assoc->user;
+
+	if(assoc->valid_qos) {
+		FREE_NULL_BITMAP(assoc->valid_qos);
+		assoc->valid_qos = bit_copy(ret_assoc->valid_qos);
+	} else
+		assoc->valid_qos = ret_assoc->valid_qos;
+
 	slurm_mutex_unlock(&assoc_mgr_association_lock);
 
 	return SLURM_SUCCESS;
@@ -1118,6 +1147,9 @@ extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user,
 		user->default_wckey = found_user->default_wckey;
 	if(!user->name)
 		user->name = found_user->name;
+	user->uid = found_user->uid;
+	if(!user->wckey_list)
+		user->wckey_list = found_user->wckey_list;
 
 	slurm_mutex_unlock(&assoc_mgr_user_lock);
 	return SLURM_SUCCESS;
@@ -1168,9 +1200,6 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos,
 
 	qos->id = found_qos->id;
 
-	if(!qos->job_flags)
-		qos->job_flags = found_qos->job_flags;
-
 	if(!qos->job_list)
 		qos->job_list = found_qos->job_list;
 
@@ -1181,12 +1210,18 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos,
 	qos->grp_submit_jobs = found_qos->grp_submit_jobs;
 	qos->grp_wall        = found_qos->grp_wall;
 
-	qos->max_cpu_mins_pu = found_qos->max_cpu_mins_pu;
-	qos->max_cpus_pu     = found_qos->max_cpus_pu;
+	qos->grp_used_cpus   = found_qos->grp_used_cpus;
+	qos->grp_used_jobs   = found_qos->grp_used_jobs;
+	qos->grp_used_nodes  = found_qos->grp_used_nodes;
+	qos->grp_used_submit_jobs   = found_qos->grp_used_submit_jobs;
+	qos->grp_used_wall   = found_qos->grp_used_wall;
+
+	qos->max_cpu_mins_pj = found_qos->max_cpu_mins_pj;
+	qos->max_cpus_pj     = found_qos->max_cpus_pj;
 	qos->max_jobs_pu     = found_qos->max_jobs_pu;
-	qos->max_nodes_pu    = found_qos->max_nodes_pu;
+	qos->max_nodes_pj    = found_qos->max_nodes_pj;
 	qos->max_submit_jobs_pu = found_qos->max_submit_jobs_pu;
-	qos->max_wall_pu     = found_qos->max_wall_pu;
+	qos->max_wall_pj     = found_qos->max_wall_pj;
 
 	if(!qos->name) 
 		qos->name = found_qos->name;
@@ -1201,6 +1236,9 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos,
 
 	qos->priority = found_qos->priority;
 
+	qos->usage_factor = found_qos->usage_factor;
+	qos->usage_raw = found_qos->usage_raw;
+
 	if(!qos->user_limit_list)
 		qos->user_limit_list = found_qos->user_limit_list;
 
@@ -1335,17 +1373,17 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 	if (wckey_pptr)
 		*wckey_pptr = ret_wckey;
 
+	if(!wckey->cluster)
+		wckey->cluster = ret_wckey->cluster;
+
 	wckey->id = ret_wckey->id;
 	
-	if(!wckey->user)
-		wckey->user = ret_wckey->user;
-	wckey->uid = ret_wckey->uid;
-	
 	if(!wckey->name)
 		wckey->name = ret_wckey->name;
-	if(!wckey->cluster)
-		wckey->cluster = ret_wckey->cluster;
 
+	wckey->uid = ret_wckey->uid;
+	if(!wckey->user)
+		wckey->user = ret_wckey->user;
 
 	slurm_mutex_unlock(&assoc_mgr_wckey_lock);
 
@@ -2133,19 +2171,19 @@ extern int assoc_mgr_update_qos(acct_update_object_t *update)
 			if(object->grp_wall != NO_VAL) 
 				rec->grp_wall = object->grp_wall;
 			
-			if(object->max_cpu_mins_pu != NO_VAL) 
-				rec->max_cpu_mins_pu = object->max_cpu_mins_pu;
-			if(object->max_cpus_pu != NO_VAL) 
-				rec->max_cpus_pu = object->max_cpus_pu;
+			if(object->max_cpu_mins_pj != NO_VAL) 
+				rec->max_cpu_mins_pj = object->max_cpu_mins_pj;
+			if(object->max_cpus_pj != NO_VAL) 
+				rec->max_cpus_pj = object->max_cpus_pj;
 			if(object->max_jobs_pu != NO_VAL) 
 				rec->max_jobs_pu = object->max_jobs_pu;
-			if(object->max_nodes_pu != NO_VAL) 
-				rec->max_nodes_pu = object->max_nodes_pu;
+			if(object->max_nodes_pj != NO_VAL) 
+				rec->max_nodes_pj = object->max_nodes_pj;
 			if(object->max_submit_jobs_pu != NO_VAL) 
 				rec->max_submit_jobs_pu =
 					object->max_submit_jobs_pu;
-			if(object->max_wall_pu != NO_VAL) 
-				rec->max_wall_pu = object->max_wall_pu;
+			if(object->max_wall_pj != NO_VAL) 
+				rec->max_wall_pj = object->max_wall_pj;
 			
 			if(object->preempt_bitstr) {
 				if(rec->preempt_bitstr) 
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index a44be40fe69c2b31b5b9ee692845b7f3cd6654ab..efe07dc384205a83d911c7ab56aee55319d13790 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -563,7 +563,6 @@ extern void destroy_acct_qos_rec(void *object)
 	acct_qos_rec_t *acct_qos = (acct_qos_rec_t *)object;
 	if(acct_qos) {
 		xfree(acct_qos->description);
-		xfree(acct_qos->job_flags);
 		if(acct_qos->job_list)
 			list_destroy(acct_qos->job_list);
 		xfree(acct_qos->name);
@@ -941,12 +940,12 @@ extern void init_acct_qos_rec(acct_qos_rec_t *qos)
 	qos->grp_submit_jobs = NO_VAL;
 	qos->grp_wall = NO_VAL;
 
-	qos->max_cpu_mins_pu = NO_VAL;
-	qos->max_cpus_pu = NO_VAL;
+	qos->max_cpu_mins_pj = NO_VAL;
+	qos->max_cpus_pj = NO_VAL;
 	qos->max_jobs_pu = NO_VAL;
-	qos->max_nodes_pu = NO_VAL;
+	qos->max_nodes_pj = NO_VAL;
 	qos->max_submit_jobs_pu = NO_VAL;
-	qos->max_wall_pu = NO_VAL;
+	qos->max_wall_pj = NO_VAL;
 
 	qos->usage_factor = NO_VAL;
 }
@@ -1256,24 +1255,37 @@ extern void pack_acct_used_limits(void *in, uint16_t rpc_version, Buf buffer)
 {
 	acct_used_limits_t *object = (acct_used_limits_t *)in;
 
-	if(!object) {
+	if(rpc_version >= 6) {
+		if(!object) {
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+		
+		pack32(object->jobs, buffer);
+		pack32(object->submit_jobs, buffer);
+		pack32(object->uid, buffer);
+	} else {
+		if(!object) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+		
 		pack64(0, buffer);
 		pack32(0, buffer);
+		pack32(object->jobs, buffer);
 		pack32(0, buffer);
+		pack32(object->submit_jobs, buffer);
 		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		return;
+		pack32(object->uid, buffer);
 	}
-	
-	pack64(object->cpu_mins, buffer);
-	pack32(object->cpus, buffer);
-	pack32(object->jobs, buffer);
-	pack32(object->nodes, buffer);
-	pack32(object->submit_jobs, buffer);
-	pack32(object->wall, buffer);
-	pack32(object->uid, buffer);
 }
 
 extern int unpack_acct_used_limits(void **object,
@@ -1283,13 +1295,21 @@ extern int unpack_acct_used_limits(void **object,
 
 	*object = (void *)object_ptr;
 
-	safe_unpack64(&object_ptr->cpu_mins, buffer);
-	safe_unpack32(&object_ptr->cpus, buffer);
-	safe_unpack32(&object_ptr->jobs, buffer);
-	safe_unpack32(&object_ptr->nodes, buffer);
-	safe_unpack32(&object_ptr->submit_jobs, buffer);
-	safe_unpack32(&object_ptr->wall, buffer);
-	safe_unpack32(&object_ptr->uid, buffer);
+	if(rpc_version >= 6) {
+		safe_unpack32(&object_ptr->jobs, buffer);
+		safe_unpack32(&object_ptr->submit_jobs, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+	} else {
+		uint64_t tmp_64;
+		uint32_t tmp_32;
+		safe_unpack64(&tmp_64, buffer);
+		safe_unpack32(&tmp_32, buffer);
+		safe_unpack32(&object_ptr->jobs, buffer);
+		safe_unpack32(&tmp_32, buffer);
+		safe_unpack32(&object_ptr->submit_jobs, buffer);
+		safe_unpack32(&tmp_32, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -2403,12 +2423,12 @@ extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack32(object->grp_submit_jobs, buffer);
 		pack32(object->grp_wall, buffer);
 
-		pack64(object->max_cpu_mins_pu, buffer);
-		pack32(object->max_cpus_pu, buffer);
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack32(object->max_cpus_pj, buffer);
 		pack32(object->max_jobs_pu, buffer);
-		pack32(object->max_nodes_pu, buffer);
+		pack32(object->max_nodes_pj, buffer);
 		pack32(object->max_submit_jobs_pu, buffer);
-		pack32(object->max_wall_pu, buffer);
+		pack32(object->max_wall_pj, buffer);
 
 		packstr(object->name, buffer);	
 
@@ -2473,12 +2493,12 @@ extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack32(object->grp_submit_jobs, buffer);
 		pack32(object->grp_wall, buffer);
 
-		pack64(object->max_cpu_mins_pu, buffer);
-		pack32(object->max_cpus_pu, buffer);
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack32(object->max_cpus_pj, buffer);
 		pack32(object->max_jobs_pu, buffer);
-		pack32(object->max_nodes_pu, buffer);
+		pack32(object->max_nodes_pj, buffer);
 		pack32(object->max_submit_jobs_pu, buffer);
-		pack32(object->max_wall_pu, buffer);
+		pack32(object->max_wall_pj, buffer);
 
 		packstr(object->name, buffer);	
 
@@ -2548,12 +2568,12 @@ extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack32(object->grp_submit_jobs, buffer);
 		pack32(object->grp_wall, buffer);
 
-		pack64(object->max_cpu_mins_pu, buffer);
-		pack32(object->max_cpus_pu, buffer);
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack32(object->max_cpus_pj, buffer);
 		pack32(object->max_jobs_pu, buffer);
-		pack32(object->max_nodes_pu, buffer);
+		pack32(object->max_nodes_pj, buffer);
 		pack32(object->max_submit_jobs_pu, buffer);
-		pack32(object->max_wall_pu, buffer);
+		pack32(object->max_wall_pj, buffer);
 
 		packstr(object->name, buffer);	
 
@@ -2618,12 +2638,12 @@ extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
-		safe_unpack64(&object_ptr->max_cpu_mins_pu, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
 		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_wall_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
 
@@ -2656,12 +2676,12 @@ extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
-		safe_unpack64(&object_ptr->max_cpu_mins_pu, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
 		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_wall_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
 
@@ -2719,12 +2739,12 @@ extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
-		safe_unpack64(&object_ptr->max_cpu_mins_pu, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
 		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_wall_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
 
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index fe4a1c72f2af02b79fbb09e7b8521f8b2b26fe87..0ce276dff7c5c2a787c33a0651b9a15f2cfa2759 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -301,13 +301,11 @@ typedef struct {
 typedef struct {
 	char *description;
 	uint32_t id;
-	char *job_flags;
 	List job_list; /* list of job pointers to submitted/running
 			  jobs (DON'T PACK) */
 
-	uint64_t grp_cpu_mins; /* max number of cpu hours the
-				     * underlying group of
-				     * associations can run for */
+	uint64_t grp_cpu_mins; /* max number of cpu minutes all jobs
+				* running under this qos can run for */
 	uint32_t grp_cpus; /* max number of cpus this qos
 			      can allocate at one time */
 	uint32_t grp_jobs;	/* max number of jobs this qos can run
@@ -328,17 +326,17 @@ typedef struct {
 	double grp_used_wall;   /* group count of time (minutes) used in
 				 * running jobs (DON'T PACK) */
 
-	uint64_t max_cpu_mins_pu; /* max number of cpu mins a user can
+	uint64_t max_cpu_mins_pj; /* max number of cpu mins a user can
 				   * use with this qos */
-	uint32_t max_cpus_pu; /* max number of cpus a user can
+	uint32_t max_cpus_pj; /* max number of cpus a job can
 			       * allocate with this qos */
 	uint32_t max_jobs_pu;	/* max number of jobs a user can
 				 * run with this qos at one time */
-	uint32_t max_nodes_pu; /* max number of nodes a user can
+	uint32_t max_nodes_pj; /* max number of nodes a job can
 				* allocate with this qos at one time */
 	uint32_t max_submit_jobs_pu; /* max number of jobs a user can
 				     submit with this qos at once */
-	uint32_t max_wall_pu; /* longest time this
+	uint32_t max_wall_pj; /* longest time this
 			       * qos can run a job */
 
 	char *name;
@@ -395,12 +393,8 @@ typedef struct {
 /* Right now this is used in the acct_qos_rec_t structure.  In the
  * user_limit_list. */
 typedef struct {
- 	uint64_t cpu_mins;	/* count of cpu mins used */
- 	uint32_t cpus;	/* count of cpus in use */
 	uint32_t jobs;	/* count of active jobs */
-	uint32_t nodes;	/* count of nodes in use */
 	uint32_t submit_jobs; /* count of jobs pending or running */
-	uint32_t wall; /* how much time this user has used */
 	uint32_t uid;
 } acct_used_limits_t;
 
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index eb55c61446f9e259b0db28ef03c2510571f3e2ea..54bbea245693c5e71206e14158e856ecd7325b75 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -828,26 +828,26 @@ static int _setup_qos_limits(acct_qos_rec_t *qos,
 		xstrcat(*extra, ", grp_wall=NULL");
 	}
 
-	if((int)qos->max_cpu_mins_pu >= 0) {
-		xstrcat(*cols, ", max_cpu_mins_per_user");
-		xstrfmtcat(*vals, ", %llu", qos->max_cpu_mins_pu);
-		xstrfmtcat(*extra, ", max_cpu_mins_per_user=%u",
-			   qos->max_cpu_mins_pu);
-	} else if((int)qos->max_cpu_mins_pu == INFINITE) {
-		xstrcat(*cols, ", max_cpu_mins_per_user");
+	if((int)qos->max_cpu_mins_pj >= 0) {
+		xstrcat(*cols, ", max_cpu_mins_per_job");
+		xstrfmtcat(*vals, ", %llu", qos->max_cpu_mins_pj);
+		xstrfmtcat(*extra, ", max_cpu_mins_per_job=%u",
+			   qos->max_cpu_mins_pj);
+	} else if((int)qos->max_cpu_mins_pj == INFINITE) {
+		xstrcat(*cols, ", max_cpu_mins_per_job");
 		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpu_mins_per_user=NULL");
+		xstrcat(*extra, ", max_cpu_mins_per_job=NULL");
 	}
 
-	if((int)qos->max_cpus_pu >= 0) {
-		xstrcat(*cols, ", max_cpus_per_user");
-		xstrfmtcat(*vals, ", %u", qos->max_cpus_pu);
-		xstrfmtcat(*extra, ", max_cpus_per_user=%u",
-			   qos->max_cpus_pu);
-	} else if((int)qos->max_cpus_pu == INFINITE) {
-		xstrcat(*cols, ", max_cpus_per_user");
+	if((int)qos->max_cpus_pj >= 0) {
+		xstrcat(*cols, ", max_cpus_per_job");
+		xstrfmtcat(*vals, ", %u", qos->max_cpus_pj);
+		xstrfmtcat(*extra, ", max_cpus_per_job=%u",
+			   qos->max_cpus_pj);
+	} else if((int)qos->max_cpus_pj == INFINITE) {
+		xstrcat(*cols, ", max_cpus_per_job");
 		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpus_per_user=NULL");
+		xstrcat(*extra, ", max_cpus_per_job=NULL");
 	}
 		
 	if((int)qos->max_jobs_pu >= 0) {
@@ -861,15 +861,15 @@ static int _setup_qos_limits(acct_qos_rec_t *qos,
 		xstrcat(*extra, ", max_jobs_per_user=NULL");		
 	}
 
-	if((int)qos->max_nodes_pu >= 0) {
-		xstrcat(*cols, ", max_nodes_per_user");
-		xstrfmtcat(*vals, ", %u", qos->max_nodes_pu);
-		xstrfmtcat(*extra, ", max_nodes_per_user=%u",
-			   qos->max_nodes_pu);
-	} else if((int)qos->max_nodes_pu == INFINITE) {
-		xstrcat(*cols, ", max_nodes_per_user");
+	if((int)qos->max_nodes_pj >= 0) {
+		xstrcat(*cols, ", max_nodes_per_job");
+		xstrfmtcat(*vals, ", %u", qos->max_nodes_pj);
+		xstrfmtcat(*extra, ", max_nodes_per_job=%u",
+			   qos->max_nodes_pj);
+	} else if((int)qos->max_nodes_pj == INFINITE) {
+		xstrcat(*cols, ", max_nodes_per_job");
 		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_nodes_per_user=NULL");
+		xstrcat(*extra, ", max_nodes_per_job=NULL");
 	}
 
 	if((int)qos->max_submit_jobs_pu >= 0) {
@@ -883,15 +883,15 @@ static int _setup_qos_limits(acct_qos_rec_t *qos,
 		xstrcat(*extra, ", max_submit_jobs_per_user=NULL");
 	}
 
-	if((int)qos->max_wall_pu >= 0) {
-		xstrcat(*cols, ", max_wall_duration_per_user");
-		xstrfmtcat(*vals, ", %u", qos->max_wall_pu);
-		xstrfmtcat(*extra, ", max_wall_duration_per_user=%u",
-			   qos->max_wall_pu);
-	} else if((int)qos->max_wall_pu == INFINITE) {
-		xstrcat(*cols, ", max_wall_duration_per_user");
+	if((int)qos->max_wall_pj >= 0) {
+		xstrcat(*cols, ", max_wall_duration_per_job");
+		xstrfmtcat(*vals, ", %u", qos->max_wall_pj);
+		xstrfmtcat(*extra, ", max_wall_duration_per_job=%u",
+			   qos->max_wall_pj);
+	} else if((int)qos->max_wall_pj == INFINITE) {
+		xstrcat(*cols, ", max_wall_duration_per_job");
 		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_wall_duration_per_user=NULL");
+		xstrcat(*extra, ", max_wall_duration_per_job=NULL");
 	}
 
 	if(qos->preempt_list && list_count(qos->preempt_list)) {
@@ -937,13 +937,6 @@ static int _setup_qos_limits(acct_qos_rec_t *qos,
 		xfree(preempt_val);
 	} 
 
-	if(qos->job_flags) {
-		xstrcat(*cols, ", job_flags");
-		xstrfmtcat(*vals, ", \"%s\"", qos->job_flags);
-		xstrfmtcat(*extra, ", job_flags=\"%s\"",
-			   qos->job_flags);
-	}
-
 	if((int)qos->usage_factor >= 0) {
 		xstrcat(*cols, ", usage_factor");
 		xstrfmtcat(*vals, ", %f", qos->usage_factor);
@@ -3130,17 +3123,16 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "description", "text" }, 
 		{ "max_jobs_per_user", "int default NULL" },
 		{ "max_submit_jobs_per_user", "int default NULL" },
-		{ "max_cpus_per_user", "int default NULL" },
-		{ "max_nodes_per_user", "int default NULL" },
-		{ "max_wall_duration_per_user", "int default NULL" },
-		{ "max_cpu_mins_per_user", "bigint default NULL" },
+		{ "max_cpus_per_job", "int default NULL" },
+		{ "max_nodes_per_job", "int default NULL" },
+		{ "max_wall_duration_per_job", "int default NULL" },
+		{ "max_cpu_mins_per_job", "bigint default NULL" },
 		{ "grp_jobs", "int default NULL" },
 		{ "grp_submit_jobs", "int default NULL" },
 		{ "grp_cpus", "int default NULL" },
 		{ "grp_nodes", "int default NULL" },
 		{ "grp_wall", "int default NULL" },
 		{ "grp_cpu_mins", "bigint default NULL" },
-		{ "job_flags", "text" },
 		{ "preempt", "text not null default ''" },
 		{ "priority", "int default 0" },
 		{ "usage_factor", "double default 1.0 not null" },
@@ -6057,12 +6049,12 @@ extern List acct_storage_p_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		qos_rec->grp_submit_jobs = qos->grp_submit_jobs;
 		qos_rec->grp_wall = qos->grp_wall;
 
-		qos_rec->max_cpus_pu = qos->max_cpus_pu;
-		qos_rec->max_cpu_mins_pu = qos->max_cpu_mins_pu;
+		qos_rec->max_cpus_pj = qos->max_cpus_pj;
+		qos_rec->max_cpu_mins_pj = qos->max_cpu_mins_pj;
 		qos_rec->max_jobs_pu  = qos->max_jobs_pu;
-		qos_rec->max_nodes_pu = qos->max_nodes_pu;
+		qos_rec->max_nodes_pj = qos->max_nodes_pj;
 		qos_rec->max_submit_jobs_pu  = qos->max_submit_jobs_pu;
-		qos_rec->max_wall_pu = qos->max_wall_pu;
+		qos_rec->max_wall_pj = qos->max_wall_pj;
 
 		qos_rec->priority = qos->priority;
 
@@ -8752,13 +8744,12 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"grp_nodes",
 		"grp_submit_jobs",
 		"grp_wall",
-		"max_cpu_mins_per_user",
-		"max_cpus_per_user",
+		"max_cpu_mins_per_job",
+		"max_cpus_per_job",
 		"max_jobs_per_user",
-		"max_nodes_per_user",
+		"max_nodes_per_job",
 		"max_submit_jobs_per_user",
-		"max_wall_duration_per_user",
-		"job_flags",
+		"max_wall_duration_per_job",
 		"preempt",
 		"priority",
 		"usage_factor",
@@ -8773,13 +8764,12 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_GN,
 		QOS_REQ_GSJ,
 		QOS_REQ_GW,
-		QOS_REQ_MCMPU,
-		QOS_REQ_MCPU,
+		QOS_REQ_MCMPJ,
+		QOS_REQ_MCPJ,
 		QOS_REQ_MJPU,
-		QOS_REQ_MNPU,
+		QOS_REQ_MNPJ,
 		QOS_REQ_MSJPU,
-		QOS_REQ_MWPU,
-		QOS_REQ_JOBF,
+		QOS_REQ_MWPJ,
 		QOS_REQ_PREE,
 		QOS_REQ_PRIO,
 		QOS_REQ_UF,
@@ -8881,9 +8871,6 @@ empty:
 		if(row[QOS_REQ_NAME] && row[QOS_REQ_NAME][0])
 			qos->name =  xstrdup(row[QOS_REQ_NAME]);
 
-		if(row[QOS_REQ_JOBF] && row[QOS_REQ_JOBF][0])
-			qos->job_flags =  xstrdup(row[QOS_REQ_JOBF]);
-
 		if(row[QOS_REQ_GCH])
 			qos->grp_cpu_mins = atoll(row[QOS_REQ_GCH]);
 		else
@@ -8909,30 +8896,30 @@ empty:
 		else
 			qos->grp_wall = INFINITE;
 
-		if(row[QOS_REQ_MCMPU])
-			qos->max_cpu_mins_pu = atoi(row[QOS_REQ_MCMPU]);
+		if(row[QOS_REQ_MCMPJ])
+			qos->max_cpu_mins_pj = atoi(row[QOS_REQ_MCMPJ]);
 		else
-			qos->max_cpu_mins_pu = INFINITE;
-		if(row[QOS_REQ_MCPU])
-			qos->max_cpus_pu = atoi(row[QOS_REQ_MCPU]);
+			qos->max_cpu_mins_pj = INFINITE;
+		if(row[QOS_REQ_MCPJ])
+			qos->max_cpus_pj = atoi(row[QOS_REQ_MCPJ]);
 		else
-			qos->max_cpus_pu = INFINITE;
+			qos->max_cpus_pj = INFINITE;
 		if(row[QOS_REQ_MJPU])
 			qos->max_jobs_pu = atoi(row[QOS_REQ_MJPU]);
 		else
 			qos->max_jobs_pu = INFINITE;
-		if(row[QOS_REQ_MNPU])
-			qos->max_nodes_pu = atoi(row[QOS_REQ_MNPU]);
+		if(row[QOS_REQ_MNPJ])
+			qos->max_nodes_pj = atoi(row[QOS_REQ_MNPJ]);
 		else
-			qos->max_nodes_pu = INFINITE;
+			qos->max_nodes_pj = INFINITE;
 		if(row[QOS_REQ_MSJPU])
 			qos->max_submit_jobs_pu = atoi(row[QOS_REQ_MSJPU]);
 		else
 			qos->max_submit_jobs_pu = INFINITE;
-		if(row[QOS_REQ_MWPU])
-			qos->max_wall_pu = atoi(row[QOS_REQ_MWPU]);
+		if(row[QOS_REQ_MWPJ])
+			qos->max_wall_pj = atoi(row[QOS_REQ_MWPJ]);
 		else
-			qos->max_wall_pu = INFINITE;
+			qos->max_wall_pj = INFINITE;
 
 		if(row[QOS_REQ_PREE] && row[QOS_REQ_PREE][0]) {
 			if(!qos->preempt_bitstr)
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index 01f3c5f64d98cb9df13c5ecc4a3e9d2b801a2f65..43b1367bacc0c1cdefdb7b59447c9d903049296f 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -445,12 +445,6 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->name = xstrdup("GrpWall");
 			field->len = 11;
 			field->print_routine = print_fields_time;
-		} else if(!strncasecmp("GrpWallRaw", object,
-				       MAX(command_len, 8))) {
-			field->type = PRINT_GRPW;
-			field->name = xstrdup("GrpWall");
-			field->len = 11;
-			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ID", object, MAX(command_len, 1))) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
@@ -497,12 +491,6 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
 			field->print_routine = print_fields_time;
-		} else if(!strncasecmp("MaxWallRaw", object,
-				       MAX(command_len, 8))) {
-			field->type = PRINT_MAXW;
-			field->name = xstrdup("MaxWall");
-			field->len = 11;
-			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("QOSRAWLevel", object,
 				       MAX(command_len, 4))) {
 			field->type = PRINT_QOS_RAW;
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index a55664a2df0b3c37ff998b3542fca2ed502c899b..494b366fb66bab2ea2337ff5ca58ea2be41f490d 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -1188,9 +1188,6 @@ extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos)
 	if(qos->preempt_list && !g_qos_list)
 		g_qos_list = acct_storage_g_get_qos(db_conn, my_uid, NULL);
 
-	if(qos->job_flags)
-		printf("  JobFlags       = %s", qos->job_flags);
-
 	if(qos->grp_cpu_mins == INFINITE)
 		printf("  GrpCPUMins     = NONE\n");
 	else if(qos->grp_cpu_mins != NO_VAL) 
@@ -1227,26 +1224,26 @@ extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos)
 		printf("  GrpWall        = %s\n", time_buf);
 	}
 
-	if(qos->max_cpu_mins_pu == INFINITE)
+	if(qos->max_cpu_mins_pj == INFINITE)
 		printf("  MaxCPUMins     = NONE\n");
-	else if(qos->max_cpu_mins_pu != NO_VAL) 
+	else if(qos->max_cpu_mins_pj != NO_VAL) 
 		printf("  MaxCPUMins     = %llu\n", 
-		       (long long unsigned)qos->max_cpu_mins_pu);
+		       (long long unsigned)qos->max_cpu_mins_pj);
 		
-	if(qos->max_cpus_pu == INFINITE)
+	if(qos->max_cpus_pj == INFINITE)
 		printf("  MaxCPUs        = NONE\n");
-	else if(qos->max_cpus_pu != NO_VAL) 
-		printf("  MaxCPUs        = %u\n", qos->max_cpus_pu);
+	else if(qos->max_cpus_pj != NO_VAL) 
+		printf("  MaxCPUs        = %u\n", qos->max_cpus_pj);
 				
 	if(qos->max_jobs_pu == INFINITE) 
 		printf("  MaxJobs        = NONE\n");
 	else if(qos->max_jobs_pu != NO_VAL) 
 		printf("  MaxJobs        = %u\n", qos->max_jobs_pu);
 		
-	if(qos->max_nodes_pu == INFINITE)
+	if(qos->max_nodes_pj == INFINITE)
 		printf("  MaxNodes       = NONE\n");
-	else if(qos->max_nodes_pu != NO_VAL)
-		printf("  MaxNodes       = %u\n", qos->max_nodes_pu);
+	else if(qos->max_nodes_pj != NO_VAL)
+		printf("  MaxNodes       = %u\n", qos->max_nodes_pj);
 		
 	if(qos->max_submit_jobs_pu == INFINITE) 
 		printf("  MaxSubmitJobs  = NONE\n");
@@ -1254,11 +1251,11 @@ extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos)
 		printf("  MaxSubmitJobs  = %u\n", 
 		       qos->max_submit_jobs_pu);
 		
-	if(qos->max_wall_pu == INFINITE) 
+	if(qos->max_wall_pj == INFINITE) 
 		printf("  MaxWall        = NONE\n");		
-	else if(qos->max_wall_pu != NO_VAL) {
+	else if(qos->max_wall_pj != NO_VAL) {
 		char time_buf[32];
-		mins2time_str((time_t) qos->max_wall_pu, 
+		mins2time_str((time_t) qos->max_wall_pj, 
 			      time_buf, sizeof(time_buf));
 		printf("  MaxWall        = %s\n", time_buf);
 	}
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index 8c5aeb4ee71d517d1acc36bdfe5071cf0ff6277b..4a450976eeb66a394d1ad912b911d6620c61fbd8 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -174,12 +174,6 @@ static int _set_rec(int *start, int argc, char *argv[],
 				qos->description =
 					strip_quotes(argv[i]+end, NULL, 1);
 			set = 1;
-		} else if (!strncasecmp (argv[i], "JobFlags",
-					 MAX(command_len, 1))) {
-			if(!qos->job_flags)
-				qos->job_flags =
-					strip_quotes(argv[i]+end, NULL, 1);
-			set = 1;			
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
 					 MAX(command_len, 7))) {
 			if(!qos)
@@ -235,17 +229,17 @@ static int _set_rec(int *start, int argc, char *argv[],
 			if(!qos)
 				continue;
 			if (get_uint64(argv[i]+end, 
-				       &qos->max_cpu_mins_pu, 
+				       &qos->max_cpu_mins_pj, 
 				       "MaxCPUMins") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxCpusPerJob", 
 					 MAX(command_len, 7))) {
 			if(!qos)
 				continue;
-			if (get_uint(argv[i]+end, &qos->max_cpus_pu,
+			if (get_uint(argv[i]+end, &qos->max_cpus_pj,
 			    "MaxCpus") == SLURM_SUCCESS)
 				set = 1;
-		} else if (!strncasecmp (argv[i], "MaxJobsPerJob",
+		} else if (!strncasecmp (argv[i], "MaxJobsPerUser",
 					 MAX(command_len, 4))) {
 			if(!qos)
 				continue;
@@ -257,10 +251,10 @@ static int _set_rec(int *start, int argc, char *argv[],
 			if(!qos)
 				continue;
 			if (get_uint(argv[i]+end, 
-			    &qos->max_nodes_pu,
+			    &qos->max_nodes_pj,
 			    "MaxNodes") == SLURM_SUCCESS)
 				set = 1;
-		} else if (!strncasecmp (argv[i], "MaxSubmitJobs",
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobsPerUser",
 					 MAX(command_len, 4))) {
 			if(!qos)
 				continue;
@@ -273,7 +267,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 				continue;
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
-				qos->max_wall_pu = (uint32_t) mins;
+				qos->max_wall_pj = (uint32_t) mins;
 				set = 1;
 			} else {
 				exit_code=1;
@@ -398,15 +392,12 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 			qos->grp_submit_jobs = start_qos->grp_submit_jobs;
 			qos->grp_wall = start_qos->grp_wall;
 
-			qos->max_cpu_mins_pu = start_qos->max_cpu_mins_pu;
-			qos->max_cpus_pu = start_qos->max_cpus_pu;
+			qos->max_cpu_mins_pj = start_qos->max_cpu_mins_pj;
+			qos->max_cpus_pj = start_qos->max_cpus_pj;
 			qos->max_jobs_pu = start_qos->max_jobs_pu;
-			qos->max_nodes_pu = start_qos->max_nodes_pu;
+			qos->max_nodes_pj = start_qos->max_nodes_pj;
 			qos->max_submit_jobs_pu = start_qos->max_submit_jobs_pu;
-			qos->max_wall_pu = start_qos->max_wall_pu;
-
-			if(start_qos->job_flags)
-				qos->job_flags = start_qos->job_flags;
+			qos->max_wall_pj = start_qos->max_wall_pj;
 
 			qos->preempt_list =
 				copy_char_list(start_qos->preempt_list);
@@ -489,7 +480,6 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 		PRINT_DESC,
 		PRINT_ID,
 		PRINT_NAME,
-		PRINT_JOBF,
 		PRINT_GRPCM,
 		PRINT_GRPC,
 		PRINT_GRPJ,
@@ -575,17 +565,23 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->name = xstrdup("GrpSubmit");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpWall", object,
+				       MAX(command_len, 4))) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
+			field->len = 11;
+			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("GrpWallRaw", object,
+				       MAX(command_len, 8))) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
+			field->len = 11;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ID", object, MAX(command_len, 1))) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("JobFlags", object,
-				       MAX(command_len, 1))) {
-			field->type = PRINT_JOBF;
-			field->name = xstrdup("JobFlags");
-			field->len = 20;
-			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("MaxCPUMinsPerJob", object,
 				       MAX(command_len, 7))) {
 			field->type = PRINT_MAXCM;
@@ -598,7 +594,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->name = xstrdup("MaxCPUs");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxJobs", object, 
+		} else if(!strncasecmp("MaxJobsPerUser", object, 
 				       MAX(command_len, 4))) {
 			field->type = PRINT_MAXJ;
 			field->name = xstrdup("MaxJobs");
@@ -610,7 +606,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxSubmitJobs", object,
+		} else if(!strncasecmp("MaxSubmitJobsPerUser", object,
 				       MAX(command_len, 4))) {
 			field->type = PRINT_MAXS;
 			field->name = xstrdup("MaxSubmit");
@@ -622,6 +618,12 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
 			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("MaxWallRaw", object,
+				       MAX(command_len, 8))) {
+			field->type = PRINT_MAXW;
+			field->name = xstrdup("MaxWall");
+			field->len = 11;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("Name", object, MAX(command_len, 1))) {
 			field->type = PRINT_NAME;
 			field->name = xstrdup("Name");
@@ -725,20 +727,15 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 					field, qos->id,
 					(curr_inx == field_count));
 				break;
-			case PRINT_JOBF:
-				field->print_routine(
-					field, qos->job_flags,
-					(curr_inx == field_count));
-				break;
 			case PRINT_MAXCM:
 				field->print_routine(
 					field,
-					qos->max_cpu_mins_pu,
+					qos->max_cpu_mins_pj,
 					(curr_inx == field_count));
 				break;
 			case PRINT_MAXC:
 				field->print_routine(field,
-						     qos->max_cpus_pu,
+						     qos->max_cpus_pj,
 						     (curr_inx == field_count));
 				break;
 			case PRINT_MAXJ:
@@ -748,7 +745,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 				break;
 			case PRINT_MAXN:
 				field->print_routine(field,
-						     qos->max_nodes_pu,
+						     qos->max_nodes_pj,
 						     (curr_inx == field_count));
 				break;
 			case PRINT_MAXS:
@@ -759,7 +756,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			case PRINT_MAXW:
 				field->print_routine(
 					field,
-					qos->max_wall_pu,
+					qos->max_wall_pj,
 					(curr_inx == field_count));
 				break;
 			case PRINT_NAME:
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 58a0f1b5f2deafe4ac20e812ac709da88fa62a2e..057993eed78f3fb8441360b32d62b1f2a93d4a12 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -809,13 +809,13 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        add account        - Clusters=, Description=, Fairshare=,           \n\
                             GrpCPUMins=, GrpCPUs=, GrpJobs=, GrpNodes=,    \n\
                             GrpSubmitJob=, GrpWall=, MaxCPUMins=, MaxJobs=,\n\
-                            MaxNodes=, MaxWall=, Names=, Organization=,    \n\
-                            Parent=, and QosLevel                          \n\
+                            MaxNodes=, MaxSubmitJobs=, MaxWall=, Names=,   \n\
+                            Organization=, Parent=, and QosLevel           \n\
        modify account     - (set options) Description=, Fairshare=,        \n\
                             GrpCPUMins=, GrpCPUs=, GrpJobs=, GrpNodes=,    \n\
                             GrpSubmitJob=, GrpWall=, MaxCPUMins=, MaxJobs=,\n\
-                            MaxNodes=, MaxWall=, Names=, Organization=,    \n\
-                            Parent=, and QosLevel=                         \n\
+                            MaxNodes=, MaxSubmitJobs=, MaxWall=, Names=,   \n\
+                            Organization=, Parent=, and QosLevel=          \n\
                             (where options) Clusters=, Descriptions=,      \n\
                             Names=, Organizations=, Parent=, and QosLevel= \n\
        delete account     - Clusters=, Descriptions=, Names=,              \n\
@@ -829,12 +829,12 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        list cluster       - Format=, Names=, WOLimits                      \n\
        add cluster        - Fairshare=, GrpCPUs=, GrpJobs=,                \n\
                             GrpNodes=, GrpSubmitJob=, MaxCPUMins=          \n\
-                            MaxJobs=, MaxNodes=, MaxWall=, Name=,          \n\
-                            and QosLevel=                                  \n\
+                            MaxJobs=, MaxNodes=, MaxSubmitJobs=, MaxWall=, \n\
+                            Name=, and QosLevel=                           \n\
        modify cluster     - (set options) Fairshare=,                      \n\
                             GrpCPUs=, GrpJobs=, GrpNodes=, GrpSubmitJob=,  \n\
-                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxWall=,    \n\
-                            and QosLevel=                                  \n\
+                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxSubmitJobs=,\n\
+                            MaxWall=, and QosLevel=                        \n\
                             (where options) Names=                         \n\
        delete cluster     - Names=                                         \n\
                                                                            \n\
@@ -844,9 +844,9 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        list qos           - Descriptions=, Format=, Ids=, Names=,          \n\
                             and WithDeleted                                \n\
        add qos            - Description=, GrpCPUMins=, GrpCPUs=, GrpJobs=, \n\
-                            GrpNodes=, GrpSubmitJob=, GrpWall=, JobFlags=, \n\
-                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxWall=,    \n\
-                            Preempt=, Priority=, and Names=                \n\
+                            GrpNodes=, GrpSubmitJob=, GrpWall=,            \n\
+                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxSubmitJobs=,\n\
+                            MaxWall=, Preempt=, Priority=, and Names=      \n\
        delete qos         - Descriptions=, IDs=, and Names=                \n\
                                                                            \n\
        list transactions  - Accounts=, Action=, Actor=, Clusters=, End=,   \n\
@@ -858,13 +858,13 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             WithRawQOS, and WOPLimits                      \n\
        add user           - Accounts=, AdminLevel=, Clusters=,             \n\
                             DefaultAccount=, DefaultWCKey=,                \n\
-                            Fairshare=, MaxCPUMins=                        \n\
-                            MaxCPUs=, MaxJobs=, MaxNodes=, MaxWall=,       \n\
+                            Fairshare=, MaxCPUMins=, MaxCPUs=,             \n\
+                            MaxJobs=, MaxNodes=, MaxSubmitJobs=, MaxWall=, \n\
                             Names=, Partitions=, and QosLevel=             \n\
        modify user        - (set options) AdminLevel=, DefaultAccount=,    \n\
                             DefaultWCKey=, Fairshare=, MaxCPUMins=,        \n\
-                            MaxCPUs= MaxJobs=,                             \n\
-                            MaxNodes=, MaxWall=, and QosLevel=             \n\
+                            MaxCPUs=, MaxJobs=, MaxNodes=,                 \n\
+                            MaxSubmitJobs=, MaxWall=, and QosLevel=        \n\
                             (where options) Accounts=, AdminLevel=,        \n\
                             Clusters=, DefaultAccounts=, Names=,           \n\
                             Partitions=, and QosLevel=                     \n\
@@ -890,18 +890,22 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
        Association        - Account, Cluster, Fairshare, GrpCPUMins,       \n\
                             GrpCPUs, GrpJobs, GrpNodes, GrpSubmitJob,      \n\
-                            GrpWall, ID, LFT, MaxCPUs, MaxCPUMins,         \n\
-                            MaxJobs, MaxNodes, MaxSubmitJobs, MaxWall, QOS,\n\
-                            ParentID, ParentName, Partition, RawQOS, RGT,  \n\
-                            User                                           \n\
+                            GrpWall, ID, LFT, MaxCPUMins, MaxCPUs,         \n\
+                            MaxJobs, MaxNodes, MaxSubmitJobs,              \n\
+                            MaxWall, QOS, ParentID, ParentName,            \n\
+                            Partition, RawQOS, RGT, User                   \n\
                                                                            \n\
        Cluster            - Cluster, ControlHost, ControlPort, CpuCount,   \n\
-                            Fairshare, GrpCPUs, GrpJobs,                   \n\
-                            GrpNodes, GrpSubmitJob, MaxCPUs,               \n\
-                            MaxCPUMins, MaxJobs, MaxNodes, MaxSubmitJobs,  \n\
+                            Fairshare, GrpCPUMins, GrpCPUs, GrpJobs,       \n\
+                            GrpNodes, GrpSubmitJob, MaxCPUMins,            \n\
+                            MaxCPUs, MaxJobs, MaxNodes, MaxSubmitJobs,     \n\
                             MaxWall, NodeCount, NodeNames                  \n\
                                                                            \n\
-       QOS                - Description, ID, Name                          \n\
+       QOS                - Description, GrpCPUMins, GrpCPUs, GrpJobs,     \n\
+                            GrpNodes, GrpSubmitJob, GrpWall, ID,           \n\
+                            MaxCPUMins, MaxCPUs, MaxJobs, MaxNodes,        \n\
+                            MaxSubmitJobs, MaxWall, Name,                  \n\
+                            Preempt, Priority, UsageFactor                 \n\
                                                                            \n\
        Transactions       - Action, Actor, Info, TimeStamp, Where          \n\
                                                                            \n\