diff --git a/NEWS b/NEWS
index 7fbfa2f1a9aec83c887fab6286325c3202dd70f2..decc6ac58249f27fd223b7cc00dd78c5b8bd8a26 100644
--- a/NEWS
+++ b/NEWS
@@ -79,6 +79,13 @@ documents those changes that are of interest to users and administrators.
     diversion.
  -- Pending job array records will be combined into single line by default,
     even if started and requeued or modified.
+ -- Fix sacct --format=nnodes to print out correct information for pending
+    jobs.
+ -- Make is so 'scontrol update job 1234 qos='' will set the qos back to
+    the default qos for the association.
+ -- Add [Alloc|Req]Nodes to sacct to be more like cpus.
+ -- Fix sacct documentation about [Alloc|Req]TRES
+ -- Put node count in TRES string for steps.
 
 * Changes in Slurm 15.08.0
 ==========================
@@ -450,6 +457,7 @@ documents those changes that are of interest to users and administrators.
     submitted.
  -- Correct counting for job array limits, job count limit underflow possible
     when master cancellation of master job record.
+ -- For pending jobs have sacct print 0 for nnodes instead of the bogus 2.
 
 * Changes in Slurm 14.11.9
 ==========================
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 5dfa2502f40f4a0ea2fcc2b51eca1e9871362f85..041cc37205640fb7d41c3c3ee5b51630682ca117 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -126,25 +126,28 @@ Print a list of fields that can be specified with the \f3\-\-format\fP option.
 .ft 3
 Fields available:
 
-AllocCPUS        Account         AssocID        AveCPU
-AveCPUFreq       AveDiskRead     AveDiskWrite   AvePages
-AveRSS           AveVMSize       BlockID        Cluster
-Comment          ConsumedEnergy  CPUTime        CPUTimeRAW
-DerivedExitCode  Elapsed         Eligible       End
-ExitCode         GID             Group          JobID
-JobIDRaw         JobName         Layout         MaxDiskRead
-MaxDiskReadNode  MaxDiskReadTask MaxDiskWrite   MaxDiskWriteNode
-MaxDiskWriteTask MaxPages        MaxPagesNode   MaxPagesTask
-MaxRSS           MaxRSSNode      MaxRSSTask     MaxVMSize
-MaxVMSizeNode    MaxVMSizeTask   MinCPU         MinCPUNode
-MinCPUTask       NCPUS           NNodes         NodeList
-NTasks           Priority        Partition      QOSRAW
-ReqCPUFreqMin    ReqCPUFreqMax   ReqCPUFreqGov  ReqCPUs
-ReqMem           Reservation     ReservationId  Reserved
-ResvCPU          ResvCPURAW      Start          State
-Submit           Suspended       SystemCPU      Timelimit
-TotalCPU         Tres            UID            User
-UserCPU          WCKey           WCKeyID
+AllocCPUS         AllocGRES         AllocNodes        AllocTRES
+Account           AssocID           AveCPU            AveCPUFreq
+AveDiskRead       AveDiskWrite      AvePages          AveRSS
+AveVMSize         BlockID           Cluster           Comment
+ConsumedEnergy    ConsumedEnergyRaw CPUTime           CPUTimeRAW
+DerivedExitCode   Elapsed           Eligible          End
+ExitCode          GID               Group             JobID
+JobIDRaw          JobName           Layout            MaxDiskRead
+MaxDiskReadNode   MaxDiskReadTask   MaxDiskWrite      MaxDiskWriteNode
+MaxDiskWriteTask  MaxPages          MaxPagesNode      MaxPagesTask
+MaxRSS            MaxRSSNode        MaxRSSTask        MaxVMSize
+MaxVMSizeNode     MaxVMSizeTask     MinCPU            MinCPUNode
+MinCPUTask        NCPUS             NNodes            NodeList
+NTasks            Priority          Partition         QOS
+QOSRAW            ReqCPUFreq        ReqCPUFreqMin     ReqCPUFreqMax
+ReqCPUFreqGov     ReqCPUS           ReqGRES           ReqMem
+ReqNodes          ReqTRES           Reservation       ReservationId
+Reserved          ResvCPU           ResvCPURAW        Start
+State             Submit            Suspended         SystemCPU
+Timelimit         TotalCPU          TRESAlloc         TRESReq
+UID               User              UserCPU           WCKey
+WCKeyID
 
 .ft 1
 .fi
@@ -441,6 +444,16 @@ Count of allocated CPUs. Equivalant to \f3NCPUS\fP.
 \f3AllocGRES\fP
 Names and counts of generic resources allocated.
 
+.TP
+\f3AllocNodes\fP
+Number of nodes allocated to the job/step.  0 if the job is pending.
+
+.TP
+\f3AllocTres\fP
+Trackable resources. These are the resources allocated to the job/step
+after the job started running.  For pending jobs this should be blank.
+For more details see AccountingStorageTRES in slurm.conf.
+
 .TP
 \f3Account\fP
 Account the job ran under.
@@ -677,7 +690,8 @@ List of nodes in job/step.
 
 .TP
 \f3NNodes\fP
-Number of nodes in a job or step.
+Number of nodes in a job or step.  If the job is running, or ran, this count
+will be the number allocated, else the number will be the number requested.
 
 .TP
 \f3NTasks\fP
@@ -718,6 +732,17 @@ Minimum required memory for the job, in MB. A 'c' at the end of
 number represents Memory Per CPU, a 'n' represents Memory Per Node.
 Note: This value is only from the job allocation, not the step.
 
+.TP
+\f3ReqNodes\fP
+Requested minimum Node count for the job/step.
+
+.TP
+\f3ReqTres\fP
+Trackable resources. These are the minimum resource counts requested by the
+job/step at submission time.
+For more details see AccountingStorageTRES in slurm.conf.
+
+
 .TP
 \f3Reservation\fP
 Reservation Name.
@@ -791,11 +816,6 @@ identical to that of the \f3Elapsed\fP field.
 NOTE: TotalCPU provides a measure of the task's parent process and
 does not include CPU time of child processes.
 
-.TP
-\f3Tres\fP
-Trackable resources. These are the resources specified by the
-job at submission time. For more details see AccountingStorageTRES in slurm.conf.
-
 .TP
 \f3UID\fP
 The user identifier of the user who ran the job.
diff --git a/src/sacct/print.c b/src/sacct/print.c
index a359d541378a475ec886a8edaae9b6916ea5fc9c..35b187958992541a4d5d901a602cf28552e92510 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -236,9 +236,10 @@ void print_fields(type_t type, void *object)
 			got_stats = true;
 
 		job_comp = NULL;
-
 		cpu_tres_rec_count = slurmdb_find_tres_count_in_string(
-			job->tres_alloc_str, TRES_CPU);
+			(job->tres_alloc_str && job->tres_alloc_str[0]) ?
+			job->tres_alloc_str : job->tres_req_str,
+			TRES_CPU);
 		break;
 	case JOBSTEP:
 		job = step->job_ptr;
@@ -251,7 +252,10 @@ void print_fields(type_t type, void *object)
 			      step->tres_alloc_str, TRES_CPU)))
 			step_cpu_tres_rec_count =
 				slurmdb_find_tres_count_in_string(
-					job->tres_alloc_str, TRES_CPU);
+					(job->tres_alloc_str &&
+					 job->tres_alloc_str[0]) ?
+					job->tres_alloc_str : job->tres_req_str,
+					TRES_CPU);
 
 		job_comp = NULL;
 		break;
@@ -314,6 +318,37 @@ void print_fields(type_t type, void *object)
 					     tmp_char,
 					     (curr_inx == field_count));
 			break;
+		case PRINT_ALLOC_NODES:
+			switch(type) {
+			case JOB:
+				tmp_int = job->alloc_nodes;
+				tmp_char = job->tres_alloc_str;
+				break;
+			case JOBSTEP:
+				tmp_int = step->nnodes;
+				tmp_char = step->tres_alloc_str;
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->node_cnt;
+				break;
+			default:
+				break;
+			}
+
+			if (!tmp_int && tmp_char) {
+				if ((tmp_uint64 =
+				     slurmdb_find_tres_count_in_string(
+					     tmp_char, TRES_NODE))
+				    != INFINITE64)
+					tmp_int = tmp_uint64;
+			}
+			convert_num_unit((double)tmp_int, outbuf,
+					 sizeof(outbuf), UNIT_NONE,
+					 params.convert_flags);
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
 		case PRINT_ACCOUNT:
 			switch(type) {
 			case JOB:
@@ -1412,24 +1447,28 @@ void print_fields(type_t type, void *object)
 			switch(type) {
 			case JOB:
 				tmp_int = job->alloc_nodes;
-				tmp_char = job->nodes;
+				tmp_char = (job->tres_alloc_str &&
+					    job->tres_alloc_str[0])
+					? job->tres_alloc_str :
+					job->tres_req_str;
 				break;
 			case JOBSTEP:
 				tmp_int = step->nnodes;
-				tmp_char = step->nodes;
+				tmp_char = step->tres_alloc_str;
 				break;
 			case JOBCOMP:
 				tmp_int = job_comp->node_cnt;
-				tmp_char = job_comp->nodelist;
 				break;
 			default:
 				break;
 			}
 
-			if (!tmp_int) {
-				hostlist_t hl = hostlist_create(tmp_char);
-				tmp_int = hostlist_count(hl);
-				hostlist_destroy(hl);
+			if (!tmp_int && tmp_char) {
+				if ((tmp_uint64 =
+				     slurmdb_find_tres_count_in_string(
+					     tmp_char, TRES_NODE))
+				    != INFINITE64)
+					tmp_int = tmp_uint64;
 			}
 			convert_num_unit((double)tmp_int, outbuf,
 					 sizeof(outbuf), UNIT_NONE,
@@ -1667,6 +1706,38 @@ void print_fields(type_t type, void *object)
 				else
 					sprintf(outbuf+strlen(outbuf), "n");
 			}
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_REQ_NODES:
+			switch(type) {
+			case JOB:
+				tmp_int = 0;
+				tmp_char = job->tres_req_str;
+				break;
+			case JOBSTEP:
+				tmp_int = step->nnodes;
+				tmp_char = step->tres_alloc_str;
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->node_cnt;
+				break;
+			default:
+				break;
+			}
+
+			if (!tmp_int && tmp_char) {
+				if ((tmp_uint64 =
+				     slurmdb_find_tres_count_in_string(
+					     tmp_char, TRES_NODE))
+				    != INFINITE64)
+					tmp_int = tmp_uint64;
+			}
+			convert_num_unit((double)tmp_int, outbuf,
+					 sizeof(outbuf), UNIT_NONE,
+					 params.convert_flags);
+
 			field->print_routine(field,
 					     outbuf,
 					     (curr_inx == field_count));
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index c7a70b4a6ad745ef8e7cd08c5a836e5159332ca6..d00b2ba7ed61a80a71508f960a05157f70634c2c 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -46,9 +46,9 @@ sacct_parameters_t params;
 print_field_t fields[] = {
 	{10, "AllocCPUS", print_fields_uint, PRINT_ALLOC_CPUS},
 	{12, "AllocGRES", print_fields_str, PRINT_ALLOC_GRES},
+	{10, "AllocNodes", print_fields_str, PRINT_ALLOC_NODES},
+	{10, "AllocTRES", print_fields_str, PRINT_TRESA},
 	{10, "Account", print_fields_str, PRINT_ACCOUNT},
-	{10, "TRESAlloc", print_fields_str, PRINT_TRESA},
-	{10, "TRESReq", print_fields_str, PRINT_TRESR},
 	{7,  "AssocID", print_fields_uint, PRINT_ASSOCID},
 	{10, "AveCPU", print_fields_str, PRINT_AVECPU},
 	{10, "AveCPUFreq", print_fields_str, PRINT_ACT_CPUFREQ},
@@ -109,6 +109,8 @@ print_field_t fields[] = {
 	{8,  "ReqCPUS", print_fields_uint, PRINT_REQ_CPUS},
 	{12, "ReqGRES", print_fields_str, PRINT_REQ_GRES},
 	{10, "ReqMem", print_fields_str, PRINT_REQ_MEM},
+	{8,  "ReqNodes", print_fields_str, PRINT_REQ_NODES},
+	{10, "ReqTRES", print_fields_str, PRINT_TRESR},
 	{20, "Reservation",  print_fields_str, PRINT_RESERVATION},
 	{8,  "ReservationId",  print_fields_uint, PRINT_RESERVATION_ID},
 	{10, "Reserved", print_fields_time_from_secs, PRINT_RESV},
diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h
index 5bc804d5a18a6fd4274214b66dbdeb33fef7926a..3a56fedfbaf76a11f9682d53d0793fd3ccb65ba0 100644
--- a/src/sacct/sacct.h
+++ b/src/sacct/sacct.h
@@ -95,6 +95,7 @@ typedef enum {
 		PRINT_ACCOUNT,
 		PRINT_ALLOC_CPUS,
 		PRINT_ALLOC_GRES,
+		PRINT_ALLOC_NODES,
 		PRINT_TRESA,
 		PRINT_TRESR,
 		PRINT_ASSOCID,
@@ -154,6 +155,7 @@ typedef enum {
 		PRINT_REQ_CPUS,
 		PRINT_REQ_GRES,
 		PRINT_REQ_MEM,
+		PRINT_REQ_NODES,
 		PRINT_RESERVATION,
 		PRINT_RESERVATION_ID,
 		PRINT_RESV,
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 093e0e888e59a25453fd75118c5cdf91c3dac114..bcaee1db9dff4c9f5112d2c87eadb7e60589d991 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -9686,14 +9686,18 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 				resv_name = job_ptr->resv_name;
 
 			memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
-			qos_rec.name = job_specs->qos;
+
+			/* If the qos is blank that means we want the default */
+			if (job_specs->qos[0])
+				qos_rec.name = job_specs->qos;
 
 			new_qos_ptr = _determine_and_validate_qos(
 				resv_name, job_ptr->assoc_ptr,
 				authorized, &qos_rec, &error_code, false);
 			if (error_code == SLURM_SUCCESS) {
 				info("%s: setting QOS to %s for job_id %u",
-				     __func__, job_specs->qos, job_ptr->job_id);
+				     __func__, new_qos_ptr->name,
+				     job_ptr->job_id);
 				if (job_ptr->qos_id != qos_rec.id) {
 					job_ptr->qos_id = qos_rec.id;
 					job_ptr->qos_ptr = new_qos_ptr;
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 46aa9e317662230d03f4f7279f0cedc7a04375e9..b7f7f73049bfae4bf90a1ce4ea2537a4ad2859d3 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -2548,10 +2548,6 @@ step_create(job_step_create_request_msg_t *step_specs,
 #endif
 	xfree(step_ptr->tres_alloc_str);
 
-	xstrfmtcat(step_ptr->tres_alloc_str, "%s%u=%"PRIu64,
-		   step_ptr->tres_alloc_str ? "," : "",
-		   TRES_CPU, cpu_count);
-
 	tres_count = (uint64_t)step_ptr->pn_min_memory;
 	if (tres_count & MEM_PER_CPU) {
 		tres_count &= (~MEM_PER_CPU);
@@ -2559,9 +2555,12 @@ step_create(job_step_create_request_msg_t *step_specs,
 	} else
 		tres_count *= node_count;
 
-	xstrfmtcat(step_ptr->tres_alloc_str, "%s%u=%"PRIu64,
+	xstrfmtcat(step_ptr->tres_alloc_str,
+		   "%s%u=%"PRIu64",%u=%"PRIu64",%u=%u",
 		   step_ptr->tres_alloc_str ? "," : "",
-		   TRES_MEM, tres_count);
+		   TRES_CPU, cpu_count,
+		   TRES_MEM, tres_count,
+		   TRES_NODE, node_count);
 
 	if ((tmp_tres_str = gres_2_tres_str(step_ptr->gres_list, 0, true))) {
 		xstrfmtcat(step_ptr->tres_alloc_str, "%s%s",