diff --git a/NEWS b/NEWS
index 4577a9004bb63ecf0b9812edd46d526a2523a4b5..e2f663bf49e0962ed9200e22364c58a4ddc5f651 100644
--- a/NEWS
+++ b/NEWS
@@ -20,6 +20,7 @@ documents those changes that are of interest to users and admins.
  -- Restore squeue support for "%G" format (group id) accidentally removed in
     2.2.0.pre7.
  -- Added preempt_mode option to qos.
+ -- added a grouping=individual for sreport size reports
 
 * Changes in SLURM 2.2.0.pre8
 =============================
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 866fde6b0ee7ee790c90472633eb96157f91a813..4b48532f47f33caf788dff9ea890568f080e465d 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -286,7 +286,9 @@ separate line without any hierarchy.
 List of group ids to include in report.  Default is all.
 .TP
 .B Grouping=<OPT>
-Comma separated list of size groupings.   (i.e. 50,100,150 would group job cpu count 1-49, 50-99, 100-149, > 150).
+Comma separated list of size groupings.   (i.e. 50,100,150 would group
+job cpu count 1-49, 50-99, 100-149, > 150).  grouping=individual will
+result in a single column for each job size found.
 .TP
 .B Jobs=<OPT>
 List of jobs/steps to include in report.  Default is all.
diff --git a/src/db_api/job_report_functions.c b/src/db_api/job_report_functions.c
index 044f840fd505bdf40b68ca787bc281ad0d4504ff..647c5c0345c4c376144c8f711b06302aace34036 100644
--- a/src/db_api/job_report_functions.c
+++ b/src/db_api/job_report_functions.c
@@ -50,6 +50,18 @@
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/xstring.h"
 
+static int _sort_group_asc(char *group_a, char *group_b)
+{
+	int size_a = atoi(group_a);
+	int size_b = atoi(group_b);
+
+	if (size_a < size_b)
+		return -1;
+	else if (size_a > size_b)
+		return 1;
+	return 0;
+}
+
 static List _process_grouped_report(void *db_conn,
 	slurmdb_job_cond_t *job_cond, List grouping_list,
 	bool flat_view, bool wckey_type)
@@ -76,6 +88,7 @@ static List _process_grouped_report(void *db_conn,
 	List tmp_acct_list = NULL;
 	bool destroy_job_cond = 0;
 	bool destroy_grouping_list = 0;
+	bool individual = 0;
 
 	uid_t my_uid = getuid();
 
@@ -92,6 +105,7 @@ static List _process_grouped_report(void *db_conn,
 		grouping_list = list_create(slurm_destroy_char);
 		slurm_addto_char_list(grouping_list, "50,250,500,1000");
 	}
+
 	tmp_acct_list = job_cond->acct_list;
 	job_cond->acct_list = NULL;
 
@@ -105,10 +119,35 @@ static List _process_grouped_report(void *db_conn,
 		goto end_it;
 	}
 
+	group_itr = list_iterator_create(grouping_list);
+	/* make a group for each job size we find. */
+	if(!list_count(grouping_list)) {
+		char *group = NULL;
+		char *tmp = NULL;
+		individual = 1;
+		itr = list_iterator_create(job_list);
+		while((job = list_next(itr))) {
+			if(!job->elapsed || !job->alloc_cpus)
+				continue;
+			tmp = xstrdup_printf("%u", job->alloc_cpus);
+			while((group = list_next(group_itr))) {
+				if(!strcmp(group, tmp)) {
+					break;
+				}
+			}
+			if(!group)
+				list_append(grouping_list, tmp);
+			else
+				xfree(tmp);
+			list_iterator_reset(group_itr);
+		}
+		list_iterator_destroy(itr);
+		list_sort(grouping_list, (ListCmpF)_sort_group_asc);
+	}
+
 	cluster_list = list_create(slurmdb_destroy_report_cluster_grouping);
 
 	cluster_itr = list_iterator_create(cluster_list);
-	group_itr = list_iterator_create(grouping_list);
 
 	if(flat_view)
 		goto no_objects;
@@ -198,17 +237,26 @@ static List _process_grouped_report(void *db_conn,
 				job_group = xmalloc(
 					sizeof(slurmdb_report_job_grouping_t));
 				job_group->jobs = list_create(NULL);
-				job_group->min_size = last_size;
+				if(!individual)
+					job_group->min_size = last_size;
 				last_size = atoi(group);
-				job_group->max_size = last_size-1;
+				if(!individual)
+					job_group->max_size = last_size-1;
+				else
+					job_group->min_size =
+						job_group->max_size = last_size;
 				list_append(acct_group->groups, job_group);
 			}
-			if(last_size) {
+			if(last_size && !individual) {
 				job_group = xmalloc(
 					sizeof(slurmdb_report_job_grouping_t));
 				job_group->jobs = list_create(NULL);
 				job_group->min_size = last_size;
-				job_group->max_size = INFINITE;
+				if(individual)
+					job_group->max_size =
+						job_group->min_size;
+				else
+					job_group->max_size = INFINITE;
 				list_append(acct_group->groups, job_group);
 			}
 			list_iterator_reset(group_itr);
@@ -294,17 +342,26 @@ no_objects:
 				job_group = xmalloc(
 					sizeof(slurmdb_report_job_grouping_t));
 				job_group->jobs = list_create(NULL);
-				job_group->min_size = last_size;
+				if(!individual)
+					job_group->min_size = last_size;
 				last_size = atoi(group);
-				job_group->max_size = last_size-1;
+				if(!individual)
+					job_group->max_size = last_size-1;
+				else
+					job_group->min_size =
+						job_group->max_size = last_size;
 				list_append(acct_group->groups, job_group);
 			}
-			if(last_size) {
+			if(last_size && !individual) {
 				job_group = xmalloc(
 					sizeof(slurmdb_report_job_grouping_t));
 				job_group->jobs = list_create(NULL);
 				job_group->min_size = last_size;
-				job_group->max_size = INFINITE;
+				if(individual)
+					job_group->max_size =
+						job_group->min_size;
+				else
+					job_group->max_size = INFINITE;
 				list_append(acct_group->groups, job_group);
 			}
 			list_iterator_reset(group_itr);
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 91c0b1ca0d0f5e86405e71c140678eea7b019148..3fafcc48675967a53700732d2e9b95fed8eedb0d 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -56,6 +56,7 @@ static List print_fields_list = NULL; /* types are of print_field_t */
 static List grouping_print_fields_list = NULL; /* types are of print_field_t */
 static int print_job_count = 0;
 static bool flat_view = false;
+static bool individual_grouping = 0;
 
 /*
  * Comparator used for sorting clusters alphabetically
@@ -284,7 +285,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "grouping",
 					 MAX(command_len, 2))) {
-			if(grouping_list)
+			if(!strcasecmp(argv[i]+end, "individual")) {
+				individual_grouping = 1;
+			} else if(grouping_list)
 				slurm_addto_char_list(grouping_list,
 						      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Jobs",
@@ -509,7 +512,11 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 			field->type = PRINT_JOB_COUNT;
 		else
 			field->type = PRINT_JOB_SIZE;
-		field->name = xstrdup_printf("%u-%u cpus", last_size, size-1);
+		if(individual_grouping)
+			field->name = xstrdup_printf("%u cpus", size);
+		else
+			field->name = xstrdup_printf("%u-%u cpus",
+						     last_size, size-1);
 		if(time_format == SLURMDB_REPORT_TIME_SECS_PER
 		   || time_format == SLURMDB_REPORT_TIME_MINS_PER
 		   || time_format == SLURMDB_REPORT_TIME_HOURS_PER)
@@ -532,12 +539,13 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 	}
 	list_iterator_destroy(itr);
 
-	if(last_size) {
+	if(last_size && !individual_grouping) {
 		field = xmalloc(sizeof(print_field_t));
 		if(print_job_count)
 			field->type = PRINT_JOB_COUNT;
 		else
 			field->type = PRINT_JOB_SIZE;
+
 		field->name = xstrdup_printf(">= %u cpus", last_size);
 		if(time_format == SLURMDB_REPORT_TIME_SECS_PER
 		   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -599,14 +607,12 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	if(!list_count(format_list))
 		slurm_addto_char_list(format_list, "Cl,a");
 
-	if(!list_count(grouping_list))
+	if(!individual_grouping && !list_count(grouping_list))
 		slurm_addto_char_list(grouping_list, "50,250,500,1000");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
 
-	_setup_grouping_print_fields_list(grouping_list);
-
 	if(!(slurmdb_report_cluster_grouping_list =
 	     slurmdb_report_job_sizes_grouped_by_top_account(db_conn,
 		     job_cond, grouping_list, flat_view))) {
@@ -614,6 +620,8 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 		goto end_it;
 	}
 
+	_setup_grouping_print_fields_list(grouping_list);
+
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
@@ -729,6 +737,9 @@ end_it:
 	if(print_job_count)
 		print_job_count = 0;
 
+	if(individual_grouping)
+		individual_grouping = 0;
+
 	slurmdb_destroy_job_cond(job_cond);
 
 	if(grouping_list) {
@@ -797,14 +808,12 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 	if(!list_count(format_list))
 		slurm_addto_char_list(format_list, "Cl,wc");
 
-	if(!list_count(grouping_list))
+	if(!individual_grouping && !list_count(grouping_list))
 		slurm_addto_char_list(grouping_list, "50,250,500,1000");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
 
-	_setup_grouping_print_fields_list(grouping_list);
-
 	if(!(slurmdb_report_cluster_grouping_list =
 	     slurmdb_report_job_sizes_grouped_by_wckey(db_conn,
 		     job_cond, grouping_list))) {
@@ -812,6 +821,8 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 		goto end_it;
 	}
 
+	_setup_grouping_print_fields_list(grouping_list);
+
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
@@ -927,6 +938,9 @@ end_it:
 	if(print_job_count)
 		print_job_count = 0;
 
+	if(individual_grouping)
+		individual_grouping = 0;
+
 	slurmdb_destroy_job_cond(job_cond);
 
 	if(grouping_list) {
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index f44bac8754ca94f1f1f5d6e78d368082d24c803d..463c206e3c59b334316393708e3d4c0dc51d2644 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -744,6 +744,8 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
              - Grouping=<OPT>   - Comma separated list of size groupings.  \n\
                                   (i.e. 50,100,150 would group job cpu count\n\
                                    1-49, 50-99, 100-149, > 150).           \n\
+                                  grouping=individual will result in a     \n\
+                                  single column for each job size found.   \n\
              - Jobs=<OPT>       - List of jobs/steps to include in report. \n\
                                   Default is all.                          \n\
              - Nodes=<OPT>      - Only show jobs that ran on these nodes.  \n\