diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index 56533f1f4179d7aee6bc939ceaffee64a56ce7d1..4720b21895c488ad3cc9fd90f1d5a96065a08eee 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -122,7 +122,7 @@ static slurm_errtab_t slurm_errtab[] = {
 	{ ESLURM_TOO_MANY_REQUESTED_CPUS,
 	  "More processors requested than permitted"		},
 	{ ESLURM_TOO_MANY_REQUESTED_NODES,
-	  "More nodes requested than permitted"			},
+	  "Node count specification invalid"			},
 	{ ESLURM_ERROR_ON_DESC_TO_RECORD_COPY,
 	  "Unable to create job record, try again"		},
 	{ ESLURM_JOB_MISSING_SIZE_SPECIFICATION,
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 51d17dd599d0e31341170c07db4c855c6290d109..9b6cc45508d9f4c7f6c87f1882de7b8cf388296d 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -224,6 +224,8 @@ extern int fini ( void )
 		struct part_record *part_ptr = NULL;
 		ListIterator itr = list_iterator_create(part_list);
 		while((part_ptr = list_next(itr))) {
+			part_ptr->max_nodes = part_ptr->max_nodes_orig;
+			part_ptr->min_nodes = part_ptr->min_nodes_orig;
 			select_p_alter_node_cnt(SELECT_SET_BP_CNT, 
 						&part_ptr->max_nodes);
 			select_p_alter_node_cnt(SELECT_SET_BP_CNT,
@@ -1164,8 +1166,8 @@ extern int select_p_update_node_state (int index, uint16_t state)
 extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 {
 	job_desc_msg_t *job_desc = (job_desc_msg_t *)data;
-	uint32_t *nodes = (uint32_t *)data;
-	int tmp, i;
+	uint32_t *nodes = (uint32_t *)data, tmp;
+	int i;
 	uint16_t req_geometry[BA_SYSTEM_DIMENSIONS];
 	
 	if(!bluegene_bp_node_cnt) {
@@ -1179,7 +1181,9 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 			(*nodes) = bluegene_bp_node_cnt;
 		break;
 	case SELECT_SET_BP_CNT:
-		if((*nodes) > bluegene_bp_node_cnt) {
+		if(((*nodes) == INFINITE) || ((*nodes) == NO_VAL))
+			tmp = (*nodes);
+		else if((*nodes) > bluegene_bp_node_cnt) {
 			tmp = (*nodes);
 			tmp /= bluegene_bp_node_cnt;
 			if(tmp < 1) 
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index b5b8d933c8ea666e29b1e95465cf98b168151d37..8429c056c62b4a210575b407ee28ea3a907a64c0 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -226,7 +226,7 @@ static void _opt_default()
 	opt.nprocs_set = false;
 	opt.cpus_per_task = 1; 
 	opt.cpus_set = false;
-	opt.min_nodes = 1;
+	opt.min_nodes = 0;
 	opt.max_nodes = 0;
 	opt.nodes_set = false;
 	opt.min_sockets_per_node = NO_VAL; /* requested min/maxsockets */
@@ -1742,7 +1742,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if ((opt.min_nodes <= 0) || (opt.max_nodes < 0) || 
+	if ((opt.min_nodes < 0) || (opt.max_nodes < 0) || 
 	    (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) {
 		error("%s: invalid number of nodes (-N %d-%d)\n",
 		      opt.progname, opt.min_nodes, opt.max_nodes);
@@ -1771,7 +1771,8 @@ static bool _opt_verify(void)
 	 * environment are more extensive and are documented in the
 	 * SLURM reference guide.  */
 	if (opt.distribution == SLURM_DIST_PLANE && opt.plane_size) {
-		if ((opt.nprocs/opt.plane_size) < opt.min_nodes) {
+		if ((opt.min_nodes <= 0) ||	
+		    ((opt.nprocs/opt.plane_size) < opt.min_nodes)) {
 			if (((opt.min_nodes-1)*opt.plane_size) >= opt.nprocs) {
 #if(0)
 				info("Too few processes ((n/plane_size) %d < N %d) "
@@ -1815,7 +1816,7 @@ static bool _opt_verify(void)
 	/* massage the numbers */
 	if ((opt.nodes_set || opt.extra_set) && !opt.nprocs_set) {
 		/* 1 proc / node default */
-		opt.nprocs = opt.min_nodes;
+		opt.nprocs = MAX(opt.min_nodes, 1);
 
 		/* 1 proc / min_[socket * core * thread] default */
 		if (opt.min_sockets_per_node > 0) {
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 6c8d7141e37ffb0390cfa6290928361c3ce49782..8f1556abdf74d3684587384842a0dc22558c7330 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -163,7 +163,8 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 	desc->req_nodes = opt.nodelist;
 	desc->exc_nodes = opt.exc_nodes;
 	desc->partition = opt.partition;
-	desc->min_nodes = opt.min_nodes;
+	if (opt.min_nodes)
+		desc->min_nodes = opt.min_nodes;
 	if (opt.licenses)
 		desc->licenses = xstrdup(opt.licenses);
 	if (opt.max_nodes)
@@ -227,7 +228,7 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.tmpdisk > -1)
 		desc->job_min_tmp_disk = opt.tmpdisk;
 	if (opt.overcommit) {
-		desc->num_procs = opt.min_nodes;
+		desc->num_procs = MAX(opt.min_nodes, 1);
 		desc->overcommit = opt.overcommit;
 	} else
 		desc->num_procs = opt.nprocs * opt.cpus_per_task;
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index d828566a6efebf4f28678ca716bd5b5f6a366f53..bff6565b667798fe1d670dbb7d5e962432643614 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -1808,7 +1808,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	bitstr_t *req_bitmap = NULL, *exc_bitmap = NULL;
 	bool super_user = false;
 	struct job_record *job_ptr;
-	uint32_t total_nodes, max_procs;
+	uint32_t total_nodes, max_procs, max_nodes_orig;
 	acct_association_rec_t assoc_rec, *assoc_ptr;
 	List license_list = NULL;
 	bool valid;
@@ -1820,14 +1820,15 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	uint16_t conn_type;
 #endif
 
-	debug2("before alteration asking for nodes %u-%u procs %u", 
+	max_nodes_orig = job_desc->max_nodes;
+	info("before alteration asking for nodes %u-%u procs %u", 
 	       job_desc->min_nodes, job_desc->max_nodes,
 	       job_desc->num_procs);
 	select_g_alter_node_cnt(SELECT_SET_NODE_CNT, job_desc);
 	select_g_get_jobinfo(job_desc->select_jobinfo,
 			     SELECT_DATA_MAX_PROCS, &max_procs);
 	
-	debug2("after alteration asking for nodes %u-%u procs %u-%u", 
+	info("after alteration asking for nodes %u-%u procs %u-%u", 
 	       job_desc->min_nodes, job_desc->max_nodes,
 	       job_desc->num_procs, max_procs);
 	
@@ -1853,6 +1854,17 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		}
 		part_ptr = default_part_loc;
 	}
+	if (job_desc->min_nodes == NO_VAL)
+		job_desc->min_nodes = part_ptr->min_nodes_orig;
+	if (job_desc->max_nodes == NO_VAL)
+		job_desc->max_nodes = part_ptr->max_nodes_orig;
+	else if (max_nodes_orig < part_ptr->min_nodes_orig) {
+		info("_job_create: job's max nodes less than partition's "
+		     "min nodes (%u < %u)", 
+		     max_nodes_orig, part_ptr->min_nodes_orig);
+		error_code = ESLURM_TOO_MANY_REQUESTED_NODES;
+		return error_code;
+	}
  
 	if ((job_desc->user_id == 0) && part_ptr->disable_root_jobs) {
 		error("Security violation, SUBMIT_JOB for user root disabled");
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 958e01338b9a9b9ea4bf2340a783a4ec0468de11..b7d38e3f0d7b15f811dfc275f81d8e22ef5b48c7 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -217,17 +217,19 @@ struct part_record *create_part_record(void)
 	    (struct part_record *) xmalloc(sizeof(struct part_record));
 
 	xassert (part_ptr->magic = PART_MAGIC);  /* set value */
-	part_ptr->name         = xstrdup("DEFAULT");
+	part_ptr->name              = xstrdup("DEFAULT");
 	part_ptr->disable_root_jobs = default_part.disable_root_jobs;
-	part_ptr->hidden       = default_part.hidden;
-	part_ptr->max_time     = default_part.max_time;
-	part_ptr->max_nodes    = default_part.max_nodes;
-	part_ptr->min_nodes    = default_part.min_nodes;
-	part_ptr->root_only    = default_part.root_only;
-	part_ptr->state_up     = default_part.state_up;
-	part_ptr->max_share    = default_part.max_share;
-	part_ptr->priority     = default_part.priority;
-	part_ptr->node_bitmap  = NULL;
+	part_ptr->hidden            = default_part.hidden;
+	part_ptr->max_time          = default_part.max_time;
+	part_ptr->max_nodes         = default_part.max_nodes;
+	part_ptr->max_nodes_orig    = default_part.max_nodes;
+	part_ptr->min_nodes         = default_part.min_nodes;
+	part_ptr->min_nodes_orig    = default_part.min_nodes;
+	part_ptr->root_only         = default_part.root_only;
+	part_ptr->state_up          = default_part.state_up;
+	part_ptr->max_share         = default_part.max_share;
+	part_ptr->priority          = default_part.priority;
+	part_ptr->node_bitmap       = NULL;
 
 	if (default_part.allow_groups)
 		part_ptr->allow_groups = xstrdup(default_part.allow_groups);
@@ -367,20 +369,20 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 	else
 		default_part_flag = 0;
 
-	packstr(part_ptr->name,     buffer);
-	pack32(part_ptr->max_time,  buffer);
-	pack32(part_ptr->max_nodes, buffer);
-	pack32(part_ptr->min_nodes, buffer);
+	packstr(part_ptr->name,          buffer);
+	pack32(part_ptr->max_time,       buffer);
+	pack32(part_ptr->max_nodes_orig, buffer);
+	pack32(part_ptr->min_nodes_orig, buffer);
 
-	pack16(default_part_flag,   buffer);
-	pack16(part_ptr->hidden,    buffer);
-	pack16(part_ptr->root_only, buffer);
-	pack16(part_ptr->max_share, buffer);
-	pack16(part_ptr->priority,  buffer);
+	pack16(default_part_flag,        buffer);
+	pack16(part_ptr->hidden,         buffer);
+	pack16(part_ptr->root_only,      buffer);
+	pack16(part_ptr->max_share,      buffer);
+	pack16(part_ptr->priority,       buffer);
 
-	pack16(part_ptr->state_up,  buffer);
-	packstr(part_ptr->allow_groups, buffer);
-	packstr(part_ptr->nodes,    buffer);
+	pack16(part_ptr->state_up,       buffer);
+	packstr(part_ptr->allow_groups,  buffer);
+	packstr(part_ptr->nodes,         buffer);
 }
 
 /*
@@ -489,21 +491,23 @@ int load_all_part_state(void)
 
 		if (part_ptr) {
 			part_cnt++;
-			part_ptr->hidden = hidden;
-			part_ptr->max_time  = max_time;
-			part_ptr->max_nodes = max_nodes;
-			part_ptr->min_nodes = min_nodes;
+			part_ptr->hidden         = hidden;
+			part_ptr->max_time       = max_time;
+			part_ptr->max_nodes      = max_nodes;
+			part_ptr->max_nodes_orig = max_nodes;
+			part_ptr->min_nodes      = min_nodes;
+			part_ptr->min_nodes_orig = min_nodes;
 			if (def_part_flag) {
 				xfree(default_part_name);
 				default_part_name = xstrdup(part_name);
 				default_part_loc = part_ptr;
 			}
-			part_ptr->root_only = root_only;
-			part_ptr->max_share = max_share;
-			part_ptr->priority  = priority;
-			part_ptr->state_up  = state_up;
+			part_ptr->root_only      = root_only;
+			part_ptr->max_share      = max_share;
+			part_ptr->priority       = priority;
+			part_ptr->state_up       = state_up;
 			xfree(part_ptr->allow_groups);
-			part_ptr->allow_groups = allow_groups;
+			part_ptr->allow_groups   = allow_groups;
 			xfree(part_ptr->nodes);
 			part_ptr->nodes = nodes;
 		} else {
@@ -550,18 +554,20 @@ int init_part_conf(void)
 	last_part_update = time(NULL);
 
 	xfree(default_part.name);	/* needed for reconfig */
-	default_part.name        = xstrdup("DEFAULT");
+	default_part.name           = xstrdup("DEFAULT");
 	default_part.disable_root_jobs = slurmctld_conf.disable_root_jobs;
-	default_part.hidden      = 0;
-	default_part.max_time    = INFINITE;
-	default_part.max_nodes   = INFINITE;
-	default_part.min_nodes   = 1;
-	default_part.root_only   = 0;
-	default_part.state_up    = 1;
-	default_part.max_share   = 1;
-	default_part.priority    = 1;
-	default_part.total_nodes = 0;
-	default_part.total_cpus  = 0;
+	default_part.hidden         = 0;
+	default_part.max_time       = INFINITE;
+	default_part.max_nodes      = INFINITE;
+	default_part.max_nodes_orig = INFINITE;
+	default_part.min_nodes      = 1;
+	default_part.min_nodes_orig = 1;
+	default_part.root_only      = 0;
+	default_part.state_up       = 1;
+	default_part.max_share      = 1;
+	default_part.priority       = 1;
+	default_part.total_nodes    = 0;
+	default_part.total_cpus     = 0;
 	xfree(default_part.nodes);
 	xfree(default_part.allow_groups);
 	xfree(default_part.allow_uids);
@@ -745,14 +751,8 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 
 	packstr(part_ptr->name, buffer);
 	pack32(part_ptr->max_time, buffer);
-	altered = part_ptr->max_nodes;
-	select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, 
-				&altered);
-	pack32(altered, buffer);
-	altered = part_ptr->min_nodes;
-	select_g_alter_node_cnt(SELECT_APPLY_NODE_MIN_OFFSET,
- 				&altered); 
-	pack32(altered, buffer);
+	pack32(part_ptr->max_nodes_orig, buffer);
+	pack32(part_ptr->min_nodes_orig, buffer);
 	altered = part_ptr->total_nodes;
 	select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, 
 				&altered);
@@ -826,7 +826,8 @@ int update_part(update_part_msg_t * part_desc)
 	if (part_desc->max_nodes != NO_VAL) {
 		info("update_part: setting max_nodes to %u for partition %s", 
 		     part_desc->max_nodes, part_desc->name);
-		part_ptr->max_nodes = part_desc->max_nodes;
+		part_ptr->max_nodes      = part_desc->max_nodes;
+		part_ptr->max_nodes_orig = part_desc->max_nodes;
 		select_g_alter_node_cnt(SELECT_SET_BP_CNT,
 					&part_ptr->max_nodes);
 	}
@@ -834,7 +835,8 @@ int update_part(update_part_msg_t * part_desc)
 	if (part_desc->min_nodes != NO_VAL) {
 		info("update_part: setting min_nodes to %u for partition %s", 
 		     part_desc->min_nodes, part_desc->name);
-		part_ptr->min_nodes = part_desc->min_nodes;
+		part_ptr->min_nodes      = part_desc->min_nodes;
+		part_ptr->min_nodes_orig = part_desc->min_nodes;
 		select_g_alter_node_cnt(SELECT_SET_BP_CNT,
 					&part_ptr->min_nodes);
 	}
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index d35b144c1a8d41fb0afb1493ddc618d15a2908ab..c1bd2188f4da39cc3b429f875f94e624ed8ab936 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -628,14 +628,16 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	if(part_ptr->disable_root_jobs) 
 		debug2("partition %s does not allow root jobs", part_ptr->name);
 	
-	part_ptr->hidden    = part->hidden_flag ? 1 : 0;
-	part_ptr->max_time  = part->max_time;
-	part_ptr->max_share = part->max_share;
-	part_ptr->max_nodes = part->max_nodes;
-	part_ptr->min_nodes = part->min_nodes;
-	part_ptr->priority  = part->priority;
-	part_ptr->root_only = part->root_only_flag ? 1 : 0;
-	part_ptr->state_up  = part->state_up_flag ? 1 : 0;
+	part_ptr->hidden         = part->hidden_flag ? 1 : 0;
+	part_ptr->max_time       = part->max_time;
+	part_ptr->max_share      = part->max_share;
+	part_ptr->max_nodes      = part->max_nodes;
+	part_ptr->max_nodes_orig = part->max_nodes;
+	part_ptr->min_nodes      = part->min_nodes;
+	part_ptr->min_nodes_orig = part->min_nodes;
+	part_ptr->priority       = part->priority;
+	part_ptr->root_only      = part->root_only_flag ? 1 : 0;
+	part_ptr->state_up       = part->state_up_flag ? 1 : 0;
 	if (part->allow_groups) {
 		xfree(part_ptr->allow_groups);
 		part_ptr->allow_groups = xstrdup(part->allow_groups);
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 6d0d8d9f85e62b732672561db48053357348645d..6ff3e1ee44dbebf9d50fec6e9a4a9076242cfa06 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -247,7 +247,9 @@ struct part_record {
 	uint16_t hidden;	/* 1 if hidden by default */
 	uint32_t max_time;	/* minutes or INFINITE */
 	uint32_t max_nodes;	/* per job or INFINITE */
+	uint32_t max_nodes_orig;/* unscaled value (c-nodes on BlueGene) */
 	uint32_t min_nodes;	/* per job */
+	uint32_t min_nodes_orig;/* unscaled value (c-nodes on BlueGene) */
 	uint32_t total_nodes;	/* total number of nodes in the partition */
 	uint32_t total_cpus;	/* total number of cpus in the partition */
 	uint32_t min_offset;	/* select plugin min offset */