diff --git a/NEWS b/NEWS
index 1c0119ac9a4c921be2bd02de1b8be6b4cf4ae046..0bc32ee872d2d7bb494b2d141b8365b2d93b384e 100644
--- a/NEWS
+++ b/NEWS
@@ -19,13 +19,16 @@ documents those changes that are of interest to users and admins.
     Based upon work by Kumar Krishna (HP, India).
  -- Add multi-core options to salloc and sbatch commands (sbatch.patch and
     cleanup.patch from Chris Holmes, HP).
- -- In select/cons_res properly release resources allocated to job being suspended
-    (rmbreak.patch, from Chris Holmes, HP).
+ -- In select/cons_res properly release resources allocated to job being 
+    suspended (rmbreak.patch, from Chris Holmes, HP).
  -- Removed database and jobacct plugin replaced with jobacct_storage 
     and jobacct_gather for easier hooks for further expansion of the
     jobacct plugin.
  -- Job step launch in srun is now done from the slurm api's all futher
     modifications to job launch should be done there.
+ -- Add new partition configuration parameter Priority and replace Shared 
+    with MaxShare.
+ -- Add new configuration parameter SchedulerTimeSlice.
 
 * Changes in SLURM 1.3.0-pre2
 =============================
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index 928f99b01fc915dcfbe92358894da9c2db28289e..a4093c8df66f9fb5f8806ff77b5b68f7798df848 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -53,6 +53,20 @@ CONFIGURATION FILE CHANGES
   specify how SLURM will talk to the database.
 * Added new paramter "CryptoType" to specify digital signature plugin to 
   be used. Currenly only supports crypto/openssl.
+* Added new parameter "SchedulerTimeSlice" to control the lenght of gang scheduler
+  time slices.
+* Added new partition parameter "Priority". A job's scheduling priority is based
+  upon two factors. First the priority of its partition and the job's priority. 
+  Since nodes can be configured in multiple partitions, this can be used to configure
+  high priority partitions (queues).
+* The partition parameter "Shared" has been replace by "MaxShare" which controls the 
+  number of jobs which can be executed on a given set of resources. Here are equivalent
+  values of the parameters:
+  Shared=EXCLUSIVE MaxShare=0
+  Shared=NO        MaxShare=1
+  Shared=YES       MaxShare=4         (or some other small number, two or higher)
+  Shared=FORCE     MaxShare=65535
+ 
 * See "man slurm.conf" for more information.
 
 OTHER CHANGES
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index 2f1c04e180bea93f139c3ee19b181c75ad71661f..121a7df36408fe4dee8683f06a2e7bff5576ff68 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -189,6 +189,7 @@ function displayfile()
    "# SCHEDULING<br>" +
    "SchedulerType=sched/" + get_radio_value(document.config.sched_type) + "<br>" +
    get_field("SchedulerPort",document.config.scheduler_port) + "<br>" +
+   "#SchedulerTimeSlice= <br>" +
    "#SchedulerRootFilter= <br>" +
    "SelectType=select/" + get_radio_value(document.config.select_type) + "<br>" +
    get_select_type_params() + "<br>" +
@@ -507,7 +508,7 @@ individual processors and memory<BR>
 	when CR_CPU or CR_CPU_MEMORY is selected. 
 	They are considered to compute the total number of 
 	tasks when -n is not specified
-    <DD> Note: CR_MEMORY assumes Shared=Yes
+    <DD> Note: CR_MEMORY assumes MaxShare value of one of higher
 <DT> <input type="radio" name="cons_res_params" value="CR_CPU" checked
 	    onClick="javascript:set_select_type(this, 'cons_res')">
     <B>CR_CPU</B>: (default)
@@ -525,7 +526,7 @@ individual processors and memory<BR>
 <DT> <input type="radio" name="cons_res_params" value="CR_Memory"
 	    onClick="javascript:set_select_type(this)">
     <B>CR_Memory</B>: Memory as a consumable resource. 
-    <DD> Note: CR_Memory assumes Shared=Yes
+    <DD> Note: CR_Memory assumes MaxShare value of one of higher
 <DT> <input type="radio" name="cons_res_params" value="CR_CPU_Memory"
 	    onClick="javascript:set_select_type(this)">
     <B>CR_CPU_Memory</B>:
@@ -723,6 +724,6 @@ before terminating all remaining tasks. A value of zero indicates unlimited wait
 </FORM>
 <HR>
 <p class="footer">UCRL-WEB-225274<br>
-Last modified 11 Sep 2007</p>
+Last modified 21 September 2007</p>
 </BODY>
 
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index be46f68245470c08c8cfd87d4f11383244032cb5..a5a619fbe78ff49d41485c4a622df47dfb3424d7 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1,4 +1,4 @@
-.TH "slurm.conf" "5" "July 2007" "slurm.conf 1.3" "Slurm configuration file"
+.TH "slurm.conf" "5" "September 2007" "slurm.conf 1.3" "Slurm configuration file"
 .SH "NAME"
 slurm.conf \- Slurm configuration file 
 .SH "DESCRIPTION"
@@ -572,14 +572,6 @@ default value is 0, which means that a node will remain in the
 DOWN state until a system administrator explicitly changes its state
 (even if the slurmd daemon registers and resumes communications).
 
-.TP
-\fBSchedulerRootFilter\fR
-If set to '1' then scheduler will filter and avoid \fBRootOnly\fR 
-partitions (let root user or process schedule these partitions).
-Otherwise scheduler will treat \fBRootOnly\fR partitions as any 
-other standard partition.
-Currently only supported by sched/backfill schedululer plugin.
-
 .TP
 \fBSchedulerPort\fR
 The port number on which slurmctld should listen for connection requests.
@@ -595,12 +587,18 @@ partitions are exempt from any external scheduling activities. The
 default value is 1. Currently only used by the built\-in backfill
 scheduling module "sched/backfill" (see \fBSchedulerType\fR).
 
+.TP
+\fBSchedulerTimeSlice\fR
+Number of seconds in each time slice when \fBSchedulerType=sched/gang\fR.
+The default value is 30.
+
 .TP
 \fBSchedulerType\fR
 Identifies the type of scheduler to be used. Acceptable values include 
 "sched/builtin" for the built\-in FIFO scheduler, 
 "sched/backfill" for a backfill scheduling module to augment 
 the default FIFO scheduling, 
+"sched/gang" for gang scheduler (time\-slicing of parallel jobs),
 "sched/hold" to hold all newly arriving jobs if a file "/etc/slurm.hold" 
 exists otherwise use the built\-in FIFO scheduler, and 
 "sched/wiki" for the Wiki interface to the Maui Scheduler. 
@@ -636,8 +634,8 @@ This is the default value for non\-BlueGene systems.
 The resources within a node are individually allocated as
 consumable resources. 
 Note that whole nodes can be allocated to jobs for selected 
-partitions by using the \fIShared=EXCLUSIVE\fR option.
-See the partition \fBShared\fR parameter for more information.
+partitions by using the \fIMaxShare=\fR option.
+See the partition \fBMaxShare\fR parameter for more information.
 .TP
 \fBselect/bluegene\fR
 for a three\-dimentional BlueGene system. 
@@ -674,7 +672,7 @@ Memory and CPUs are consumable resources.
 .TP
 \fBCR_Memory\fR
 Memory is a consumable resource.
-NOTE: This implies \fIShared=Yes\fR for all partitions.
+NOTE: This implies \fIMaxShare\fR is non\-zero  for all partitions.
 .RE
 
 .TP
@@ -1311,6 +1309,25 @@ may be allocated to any single job.
 The default value is "UNLIMITED", which is represented internally as \-1.
 This limit does not apply to jobs executed by SlurmUser or user root.
 
+.TP
+\fBMaxShare\fR
+Ability of the partition to execute more than one job at a 
+time on each node. Shared nodes will offer unpredictable performance 
+for application programs, but can provide higher system utilization 
+and responsiveness than otherwise possible. 
+Accepts a numeric value indicating the number of jobs which can 
+be allocated each resources in the partition. 
+For example, a value of two indicates that two jobs can simultaneously
+be allocated each resources. 
+If \fBSchedulerType=sched/gang\fR then those jobs can be time sliced.
+Other values of \fBSchedulerType\fR will rely upon the operating 
+system to perform timesharing of the resouces to active jobs.
+The default value is 1 (resources are not shared).
+A value of 0 is treated as a special case to allocate entire
+nodes to jobs even with \fBSelectType=select/cons_res\fR.
+A value of 65535 is treated as a special case to make all nodes 
+available for sharing without any user means of disabling it.
+
 .TP
 \fBMaxTime\fR
 Maximum wall\-time limit for any job in minutes. The default 
@@ -1342,24 +1359,18 @@ with that record will apply to subsequent partition specifications
 unless explicitly set to other values in that partition record or
 replaced with a different set of default values.
 
+.TP
+\fBPriority\fR
+Jobs submitted to a higher priority partition will be dispatched 
+before pending jobs in lower priority partitions and if possible
+they will preempt running jobs from lower priority partitions.
+Note that a partition's priority takes precedence over a job's 
+priority.
+The value may not exceed 65533.
+
 .TP
 \fBShared\fR
-Ability of the partition to execute more than one job at a 
-time on each node. Shared nodes will offer unpredictable performance 
-for application programs, but can provide higher system utilization 
-and responsiveness than otherwise possible. 
-Possible values are "EXCLUSIVE", "FORCE", "YES", and "NO".
-"EXCLUSIVE" allocates entire nodes to jobs even with 
-select/cons_res configured.
-This can be used to allocate whole nodes in some partitions 
-and individual processors in other partitions. 
-"FORCE" makes all nodes in the partition available for sharing 
-without user means of disabling it.
-"YES" makes nodes in the partition available for sharing if and 
-only if the individual jobs permit sharing (see the srun 
-"\-\-share" option).
-"NO" makes nodes unavailable for sharing under all circumstances. 
-The default value is "NO".
+This parameter has been replace by \fBMaxShare\fR.
 
 .TP
 \fBState\fR
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index a716b0f223df2f3cf2886520702548076a8df273..291eab2076ee1daec76dd1cf600256e30fdc0ab8 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -425,13 +425,6 @@ typedef enum select_type_plugin_info {
 #define TASK_PARAM_CPUSETS 0x0001
 #define TASK_PARAM_SCHED   0x0002
 
-enum part_shared {
-	SHARED_NO = 0,		/* Nodes never shared in partition */
-	SHARED_YES,		/* Nodes possible to share in partition */
-	SHARED_FORCE,		/* Nodes always shares in partition */
-	SHARED_EXCLUSIVE	/* Nodes never shared even with cons_res */
-};
-
 /*****************************************************************************\
  *	PROTOCOL DATA STRUCTURE DEFINITIONS
 \*****************************************************************************/
@@ -819,8 +812,9 @@ typedef struct partition_info {
 	uint16_t node_scaling;	/* select plugin node scaling factor */
 	uint16_t default_part;	/* 1 if this is default partition */
 	uint16_t hidden;	/* 1 if partition is hidden by default */
+	uint16_t priority;	/* scheduling priority for jobs */
 	uint16_t root_only;	/* 1 if allocate must come for user root */
-	uint16_t shared;	/* See part_shared above */
+	uint16_t max_share;	/* number of jobs to gang schedule */
 	uint16_t state_up;	/* 1 if state is up, 0 if down */
 	char *nodes;		/* list names of nodes in partition */
 	int *node_inx;		/* list index pairs into node_table:
@@ -920,6 +914,7 @@ typedef struct slurm_ctl_conf {
 				 * registration */
 	uint16_t resume_rate;	/* nodes to make full power, per minute */
 	char *resume_program;	/* program to make nodes full power */
+	uint16_t sched_time_slice;	/* gang scheduler slice time, secs */
 	char *schedtype;	/* type of scheduler to use */
 	uint16_t schedport;	/* port for scheduler connection */
 	uint16_t schedrootfltr;	/* 1 if rootOnly partitions should be
diff --git a/src/api/config_info.c b/src/api/config_info.c
index 4f47f608f4a30f9d4ddec14dacd2a1ae955ea219..cce832844c52faaf16aed93273528898e6c36003 100644
--- a/src/api/config_info.c
+++ b/src/api/config_info.c
@@ -221,6 +221,8 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->schedport);
 	fprintf(out, "SchedulerRootFilter     = %u\n",
 		slurm_ctl_conf_ptr->schedrootfltr);
+	fprintf(out, "SchedulerTimeSlice      = %u\n",
+		slurm_ctl_conf_ptr->sched_time_slice);
 	fprintf(out, "SchedulerType           = %s\n",
 		slurm_ctl_conf_ptr->schedtype);
 	fprintf(out, "SelectType              = %s\n",
diff --git a/src/api/init_msg.c b/src/api/init_msg.c
index f06d15829cdfaab22380311246d5acb2cbc2dbc0..7d593eff4175a7abd294f160ba77ceaf92da6afe 100644
--- a/src/api/init_msg.c
+++ b/src/api/init_msg.c
@@ -150,7 +150,8 @@ void slurm_init_part_desc_msg (update_part_msg_t * update_part_msg)
 	update_part_msg->hidden 	= (uint16_t) NO_VAL;
 	update_part_msg->default_part 	= (uint16_t) NO_VAL;
 	update_part_msg->root_only 	= (uint16_t) NO_VAL;
-	update_part_msg->shared 	= (uint16_t) NO_VAL;
+	update_part_msg->max_share 	= (uint16_t) NO_VAL;
+	update_part_msg->priority 	= (uint16_t) NO_VAL;
 	update_part_msg->state_up 	= (uint16_t) NO_VAL;
 }
 
diff --git a/src/api/partition_info.c b/src/api/partition_info.c
index 2eff5fa6c22658bfc106736945e75d679ccce25f..f6a0b4e80e3a5305ad944a9d22d323986f89f0f2 100644
--- a/src/api/partition_info.c
+++ b/src/api/partition_info.c
@@ -2,7 +2,7 @@
  *  partition_info.c - get/print the partition state information of slurm
  *  $Id$
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
  *  UCRL-CODE-226842.
@@ -137,14 +137,8 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 	else
 		sprintf(tmp_line, "Default=NO ");
 	xstrcat(out, tmp_line);
-	if (part_ptr->shared == SHARED_NO)
-		sprintf(tmp_line, "Shared=NO ");
-	else if (part_ptr->shared == SHARED_YES)
-		sprintf(tmp_line, "Shared=YES ");
-	else if (part_ptr->shared == SHARED_EXCLUSIVE)
-		sprintf(tmp_line, "Shared=EXCLUSIVE ");
-	else
-		sprintf(tmp_line, "Shared=FORCE ");
+	sprintf(tmp_line, "MaxShare=%u Priority=%u ",
+		part_ptr->max_share, part_ptr->priority);
 	xstrcat(out, tmp_line);
 	if (part_ptr->state_up)
 		sprintf(tmp_line, "State=UP ");
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 750b8db3118c573b3ab30bb33484be73d023813d..217eed6c4a508649570ae86e8d61824a3cd68134 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -175,6 +175,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"SchedulerAuth", S_P_STRING},
 	{"SchedulerPort", S_P_UINT16},
 	{"SchedulerRootFilter", S_P_UINT16},
+	{"SchedulerTimeSlice", S_P_UINT16},
 	{"SchedulerType", S_P_STRING},
 	{"SelectType", S_P_STRING},
 	{"SelectTypeParameters", S_P_STRING},
@@ -422,8 +423,10 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 		{"Hidden", S_P_BOOLEAN}, /* YES or NO */
 		{"MaxTime", S_P_UINT32}, /* INFINITE or a number */
 		{"MaxNodes", S_P_UINT32}, /* INFINITE or a number */
+		{"MaxShare", S_P_UINT16},
 		{"MinNodes", S_P_UINT32},
 		{"Nodes", S_P_STRING},
+		{"Priority", S_P_UINT16},
 		{"RootOnly", S_P_BOOLEAN}, /* YES or NO */
 		{"Shared", S_P_STRING}, /* YES, NO, or FORCE */
 		{"State", S_P_BOOLEAN}, /* UP or DOWN */
@@ -482,20 +485,26 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 		    && !s_p_get_boolean(&p->root_only_flag, "RootOnly", dflt))
 			p->root_only_flag = false;
 
-		if (!s_p_get_string(&tmp, "Shared", tbl)
-		    && !s_p_get_string(&tmp, "Shared", dflt)) {
-			p->shared = SHARED_NO;
-		} else {
+		if (!s_p_get_uint16(&p->priority, "Priority", tbl) &&
+		    !s_p_get_uint16(&p->priority, "Priority", dflt))
+			p->priority = 1;
+
+		if (s_p_get_uint16(&p->max_share, "MaxShare", tbl) ||
+		    s_p_get_uint16(&p->max_share, "MaxShare", dflt)) {
+			/* Use MaxShare and ignore Shared value */
+			;
+		} else if (s_p_get_string(&tmp, "Shared", tbl) ||
+		           s_p_get_string(&tmp, "Shared", dflt)) {
 			if (strcasecmp(tmp, "NO") == 0)
-				p->shared = SHARED_NO;
+				p->max_share = 1;
 #ifndef HAVE_XCPU
 			/* Only "Shared=NO" is valid on XCPU systems */
 			else if (strcasecmp(tmp, "YES") == 0)
-				p->shared = SHARED_YES;
+				p->max_share = (uint16_t) 64;
 			else if (strcasecmp(tmp, "EXCLUSIVE") == 0)
-				p->shared = SHARED_EXCLUSIVE;
+				p->max_share = 0;
 			else if (strcasecmp(tmp, "FORCE") == 0)
-				p->shared = SHARED_FORCE;
+				p->max_share = (uint16_t) INFINITE;
 #endif
 			else {
 				error("Bad value \"%s\" for Shared", tmp);
@@ -504,7 +513,11 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 				xfree(tmp);
 				return -1;
 			}
+		} else {
+			/* No MaxShare or Shared specified */
+			p->min_nodes = 1;
 		}
+
 		xfree(tmp);
 
 		if (!s_p_get_boolean(&p->state_up_flag, "State", tbl)
@@ -1122,6 +1135,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->resume_program);
 	ctl_conf_ptr->resume_rate		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->ret2service		= (uint16_t) NO_VAL;
+	ctl_conf_ptr->sched_time_slice		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->schedport			= (uint16_t) NO_VAL;
 	ctl_conf_ptr->schedrootfltr		= (uint16_t) NO_VAL;
 	xfree( ctl_conf_ptr->schedtype );
@@ -1640,6 +1654,10 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->schedport = DEFAULT_SCHEDULER_PORT;
 	}
 
+	if (!s_p_get_uint16(&conf->sched_time_slice, "SchedulerTimeSlice",
+	    hashtbl))
+		conf->sched_time_slice = DEFAULT_SCHED_TIME_SLICE;
+
 	if (!s_p_get_uint16(&conf->schedrootfltr,
 			    "SchedulerRootFilter", hashtbl))
 		conf->schedrootfltr = DEFAULT_SCHEDROOTFILTER;
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 188774c2b36b5a2e119654c2d7135b2e3a48c038..ea9f91a6bfd1bfe1ad0a0b6bd687a9db4dbd65d2 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -91,6 +91,7 @@ extern char *default_plugstack;
 #define DEFAULT_SAVE_STATE_LOC      "/tmp"
 #define DEFAULT_SCHEDROOTFILTER     1
 #define DEFAULT_SCHEDULER_PORT      7321
+#define DEFAULT_SCHED_TIME_SLICE    30
 #define DEFAULT_SCHEDTYPE           "sched/builtin"
 #ifdef HAVE_BG		/* Blue Gene specific default configuration parameters */
 #  define DEFAULT_SELECT_TYPE       "select/bluegene"
@@ -137,10 +138,10 @@ typedef struct slurm_conf_partition {
 	uint32_t min_nodes;	/* per job */
 	uint32_t total_nodes;	/* total number of nodes in the partition */
 	uint32_t total_cpus;	/* total number of cpus in the partition */
+	uint16_t priority;	/* scheduling priority for jobs */
 	bool     root_only_flag;/* 1 if allocate/submit RPC can only be 
 				   issued by user root */
-	uint16_t shared;	/* 1 if job can share a node,
-				   2 if sharing required */
+	uint16_t max_share;	/* number of jobs to gang schedule */
 	bool     state_up_flag;	/* 1 if state is up, 0 if down */
 	char *nodes;		/* comma delimited list names of nodes */
 	char *allow_groups;	/* comma delimited list of groups, 
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index 61795df2d5496bfcc191996e7da497c25b661929..af4c3b7ed89bf477eef86478cbcd130619fdf2d0 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -1423,9 +1423,10 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer)
 	packstr(msg->name,         buffer);
 	packstr(msg->nodes,        buffer);
 
-	pack16(msg-> hidden,      buffer);
+	pack16(msg-> hidden,       buffer);
+	pack16(msg-> max_share,    buffer);
+	pack16(msg-> priority,     buffer);
 	pack16(msg-> root_only,    buffer);
-	pack16(msg-> shared,       buffer);
 	pack16(msg-> state_up,     buffer);
 }
 
@@ -1449,10 +1450,11 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer)
 	safe_unpackstr_xmalloc(&tmp_ptr->name, &uint16_tmp, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->nodes, &uint16_tmp, buffer);
 
-	safe_unpack16(&tmp_ptr->hidden, buffer);
+	safe_unpack16(&tmp_ptr->hidden,    buffer);
+	safe_unpack16(&tmp_ptr->max_share, buffer);
+	safe_unpack16(&tmp_ptr->priority,  buffer);
 	safe_unpack16(&tmp_ptr->root_only, buffer);
-	safe_unpack16(&tmp_ptr->shared, buffer);
-	safe_unpack16(&tmp_ptr->state_up, buffer);
+	safe_unpack16(&tmp_ptr->state_up,  buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1811,7 +1813,8 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer)
 	safe_unpack16(&part->default_part, buffer);
 	safe_unpack16(&part->hidden,       buffer);
 	safe_unpack16(&part->root_only,    buffer);
-	safe_unpack16(&part->shared,       buffer);
+	safe_unpack16(&part->max_share,    buffer);
+	safe_unpack16(&part->priority,     buffer);
 
 	safe_unpack16(&part->state_up, buffer);
 	safe_unpackstr_xmalloc(&part->allow_groups, &uint16_tmp, buffer);
@@ -2188,6 +2191,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 
 	pack16(build_ptr->schedport, buffer);
 	pack16(build_ptr->schedrootfltr, buffer);
+	pack16(build_ptr->sched_time_slice, buffer);
 	packstr(build_ptr->schedtype, buffer);
 	packstr(build_ptr->select_type, buffer);
 	pack16(build_ptr->select_type_param, buffer);
@@ -2328,6 +2332,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 
 	safe_unpack16(&build_ptr->schedport, buffer);
 	safe_unpack16(&build_ptr->schedrootfltr, buffer);
+	safe_unpack16(&build_ptr->sched_time_slice, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->schedtype, &uint16_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->select_type, &uint16_tmp, buffer);
 	safe_unpack16(&build_ptr->select_type_param, buffer);
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 804d289cdf7bcd0b57712ce24e5c05a826b9173e..235b3e9f974dfa825785d53751d9b52c9376b3ca 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -198,7 +198,7 @@ backfill_agent(void *args)
 			part_iterator = list_iterator_create(part_list);
 			while ((part_ptr = (struct part_record *) 
 						list_next(part_iterator))) {
-				if ( ((part_ptr->shared)       ||
+				if ( ((part_ptr->max_share > 1) ||
 				      (part_ptr->state_up == 0)) )
 				 	continue; /* not under our control */
 				if ((part_ptr->root_only) && filter_root)
diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c
index c8bd554429bd8716bf8fcb17200bf03fba0a2c1b..703da587dd356b06a3a81abec6046adcce170729 100644
--- a/src/plugins/sched/wiki2/get_jobs.c
+++ b/src/plugins/sched/wiki2/get_jobs.c
@@ -346,16 +346,15 @@ static void	_get_job_comment(struct job_record *job_ptr,
 
 	/* SHARED NODES */
 	if (cr_enabled)	{			/* consumable resources */
-		if (job_ptr->part_ptr->shared == SHARED_EXCLUSIVE)
+		if (job_ptr->part_ptr->max_share == 0)	/* Exclusive use */
 			sharing = 0;
-		else if (job_ptr->details && (job_ptr->details->shared != 0))
+		else if (job_ptr->details && job_ptr->details->shared)
 			sharing = 1;
-	} else if (job_ptr->part_ptr) {			/* partition with */
-		if (job_ptr->part_ptr->shared == SHARED_FORCE)
-			sharing = 1;
-		else if ((job_ptr->part_ptr->shared == SHARED_YES)
-		&&  (job_ptr->details)			/* optional for partition */
-		&&  (job_ptr->details->shared))		/* with job to share */
+	} else if (job_ptr->part_ptr) {		/* partition level control */
+		if (job_ptr->part_ptr->max_share == (uint16_t) INFINITE)
+			sharing = 1;		/* Sharing forced */
+		else if ((job_ptr->part_ptr->max_share > 1) &&
+		         (job_ptr->details) && (job_ptr->details->shared))
 			sharing = 1;
 	}
 	if (sharing) {
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 1651aff7cf0de8421c7da70610fa0be611472c6e..dc03a6250ea5e9a4b4ac02c34345b1b52f1ba2a3 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -911,7 +911,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 			slurm_mutex_lock(&block_state_mutex);
 
 			if((record->ionodes)
-			   && (job_ptr->part_ptr->shared == 0))
+			   && (job_ptr->part_ptr->max_share <= 1))
 				error("Small block used in "
 				      "non-shared partition");
 
diff --git a/src/scontrol/update_part.c b/src/scontrol/update_part.c
index 4b21a854259338392f2fe18d72f275ee2ebabd68..d00f09c8fb8d3b5bd4d549ac219a1f251f5f1ce1 100644
--- a/src/scontrol/update_part.c
+++ b/src/scontrol/update_part.c
@@ -127,23 +127,14 @@ scontrol_update_part (int argc, char *argv[])
 			}
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Shared=", 7) == 0) {
-			if (strcasecmp(&argv[i][7], "NO") == 0)
-				part_msg.shared = SHARED_NO;
-			else if (strcasecmp(&argv[i][7], "YES") == 0)
-				part_msg.shared = SHARED_YES;
-			else if (strcasecmp(&argv[i][7], "EXCLUSIVE") == 0)
-				part_msg.shared = SHARED_EXCLUSIVE;
-			else if (strcasecmp(&argv[i][7], "FORCE") == 0)
-				part_msg.shared = SHARED_FORCE;
-			else {
-				exit_code = 1;
-				fprintf (stderr, "Invalid input: %s\n", 
-					 argv[i]);
-				fprintf (stderr, "Acceptable Shared values "
-					"are YES, NO and FORCE\n");
-				return 0;
-			}
+		else if (strncasecmp(argv[i], "MaxShare=", 9) == 0) {
+			part_msg.max_share = (uint16_t) strtol(&argv[i][9], 
+					(char **) NULL, 10);
+			update_cnt++;
+		}
+		else if (strncasecmp(argv[i], "Priority=", 9) == 0) {
+			part_msg.priority = (uint16_t) strtol(&argv[i][9], 
+					(char **) NULL, 10);
 			update_cnt++;
 		}
 		else if (strncasecmp(argv[i], "State=", 6) == 0) {
diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c
index a01a349d487deb0b3c0ea1fd22c4978904217024..c91d81df385bc8dfbe69cb04b25717f994d0afa3 100644
--- a/src/sinfo/opts.c
+++ b/src/sinfo/opts.c
@@ -517,6 +517,12 @@ _parse_format( char* format )
 					field_size, 
 					right_justify, 
 					suffix );
+		} else if (field[0] == 'p') {
+			params.match_flags.priority_flag = true;
+			format_add_priority( params.format_list, 
+					field_size, 
+					right_justify, 
+					suffix );
 		} else if (field[0] == 'P') {
 			params.match_flags.partition_flag = true;
 			format_add_partition( params.format_list, 
@@ -692,6 +698,8 @@ void _print_options( void )
 			"true" : "false");
 	printf("partition_flag  = %s\n", params.match_flags.partition_flag ?
 			"true" : "false");
+	printf("priority_flag   = %s\n", params.match_flags.priority_flag ?
+			"true" : "false");
 	printf("reason_flag     = %s\n", params.match_flags.reason_flag ?
 			"true" : "false");
 	printf("root_flag       = %s\n", params.match_flags.root_flag ?
diff --git a/src/sinfo/print.c b/src/sinfo/print.c
index 9a235a17627ba10e43023c9d567ea0dd83854ef0..3f16b822267535627345ccb10f31b5b025c78b10 100644
--- a/src/sinfo/print.c
+++ b/src/sinfo/print.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  print.c - sinfo print job functions
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov> and 
  *  Morris Jette <jette1@llnl.gov>
@@ -170,7 +170,8 @@ static int _print_secs(long time, int width, bool right, bool cut_output)
 }
 
 static int 
-_build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max, bool range)
+_build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max, 
+			 bool range)
 {
 	char tmp_min[7];
 	char tmp_max[7];
@@ -191,7 +192,8 @@ _build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max,
 }
 
 static int 
-_build_min_max_32_string(char *buffer, int buf_size, uint32_t min, uint32_t max, bool range)
+_build_min_max_32_string(char *buffer, int buf_size, uint32_t min, uint32_t max, 
+			 bool range)
 {
 	char tmp_min[7];
 	char tmp_max[7];
@@ -631,6 +633,24 @@ int _print_prefix(sinfo_data_t * job, int width, bool right_justify,
 	return SLURM_SUCCESS;
 }
 
+int _print_priority(sinfo_data_t * sinfo_data, int width,
+			bool right_justify, char *suffix)
+{
+	char id[FORMAT_STRING_SIZE];
+
+	if (sinfo_data) {
+		_build_min_max_16_string(id, FORMAT_STRING_SIZE, 
+		                      sinfo_data->part_info->priority, 
+		                      sinfo_data->part_info->priority, true);
+		_print_str(id, width, right_justify, true);
+	} else
+		_print_str("PRIORITY", width, right_justify, true);
+
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_reason(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix)
 {
@@ -668,17 +688,15 @@ int _print_root(sinfo_data_t * sinfo_data, int width,
 int _print_share(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix)
 {
+	char id[FORMAT_STRING_SIZE];
+
 	if (sinfo_data) {
-		if (sinfo_data->part_info == NULL)
-			_print_str("n/a", width, right_justify, true);
-		else if (sinfo_data->part_info->shared > 1)
-			_print_str("force", width, right_justify, true);
-		else if (sinfo_data->part_info->shared)
-			_print_str("yes", width, right_justify, true);
-		else
-			_print_str("no", width, right_justify, true);
+		_build_min_max_16_string(id, FORMAT_STRING_SIZE, 
+		                      sinfo_data->part_info->max_share, 
+		                      sinfo_data->part_info->max_share, true);
+		_print_str(id, width, right_justify, true);
 	} else
-		_print_str("SHARE", width, right_justify, true);
+		_print_str("MAX_SHARE", width, right_justify, true);
 
 	if (suffix)
 		printf("%s", suffix);
diff --git a/src/sinfo/print.h b/src/sinfo/print.h
index 989f25b7913d1b44f3a891493f36c8ae6fd0a355..c2c2645717ee5bbc7cd3f5552ce63ac6697a398b 100644
--- a/src/sinfo/print.h
+++ b/src/sinfo/print.h
@@ -100,6 +100,8 @@ int  print_sinfo_list(List sinfo_list);
 	format_add_function(list,wid,right,suffix,_print_partition)
 #define format_add_prefix(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_prefix)
+#define format_add_priority(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_priority)
 #define format_add_reason(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_reason)
 #define format_add_root(list,wid,right,prefix) \
@@ -155,6 +157,8 @@ int _print_partition(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
 int _print_prefix(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
+int _print_priority(sinfo_data_t * sinfo_data, int width,
+			bool right_justify, char *suffix);
 int _print_reason(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
 int _print_root(sinfo_data_t * sinfo_data, int width, 
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index 599c98f4245e3d8bc61ed510425acdf8ea6b7a1b..701de69021a7ab11deac513f824f5d7511965850 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -3,7 +3,7 @@
  *
  *  $Id$
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
  *  UCRL-CODE-226842.
@@ -629,7 +629,11 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 		return false;
 
 	if (params.match_flags.share_flag &&
-	    (part_ptr->shared != sinfo_ptr->part_info->shared))
+	    (part_ptr->max_share != sinfo_ptr->part_info->max_share))
+		return false;
+
+	if (params.match_flags.priority_flag &&
+	    (part_ptr->priority != sinfo_ptr->part_info->priority))
 		return false;
 
 	return true;
diff --git a/src/sinfo/sinfo.h b/src/sinfo/sinfo.h
index b1e6d3d1064fcb99380be5c5d54f882d9bdb2069..1c9212cfc9998c9de2feaffd9f1498deb8f7316a 100644
--- a/src/sinfo/sinfo.h
+++ b/src/sinfo/sinfo.h
@@ -105,7 +105,7 @@ typedef struct {
 	hostlist_t ionodes;
 #endif
 	/* part_info contains partition, avail, max_time, job_size, 
-	 * root, share, groups */
+	 * root, share, groups, priority */
 	partition_info_t* part_info;
 	uint16_t part_inx;
 } sinfo_data_t;
@@ -126,6 +126,7 @@ struct sinfo_match_flags {
 	bool max_time_flag;
 	bool memory_flag;
 	bool partition_flag;
+	bool priority_flag;
 	bool reason_flag;
 	bool root_flag;
 	bool share_flag;
diff --git a/src/sinfo/sort.c b/src/sinfo/sort.c
index 08303757f509a1cf1890b53e00d7a53681e3d82d..a98141dc05f3ee723f55279d181daec9be1cee07 100644
--- a/src/sinfo/sort.c
+++ b/src/sinfo/sort.c
@@ -63,6 +63,7 @@ static int _sort_by_node_list(void *void1, void *void2);
 static int _sort_by_nodes_ai(void *void1, void *void2);
 static int _sort_by_nodes(void *void1, void *void2);
 static int _sort_by_partition(void *void1, void *void2);
+static int _sort_by_priority(void *void1, void *void2);
 static int _sort_by_reason(void *void1, void *void2);
 static int _sort_by_reason_time(void *void1, void *void2);
 static int _sort_by_root(void *void1, void *void2);
@@ -122,6 +123,8 @@ void sort_sinfo_list(List sinfo_list)
 				list_sort(sinfo_list, _sort_by_memory);
 		else if (params.sort[i] == 'N')
 				list_sort(sinfo_list, _sort_by_node_list);
+		else if (params.sort[i] == 'p')
+				list_sort(sinfo_list, _sort_by_priority);
 		else if (params.sort[i] == 'P')
 				list_sort(sinfo_list, _sort_by_partition);
 		else if (params.sort[i] == 'r')
@@ -527,9 +530,27 @@ static int _sort_by_share(void *void1, void *void2)
 	int val1 = 0, val2 = 0;
 
 	if (sinfo1->part_info)
-		val1 = sinfo1->part_info->shared;
+		val1 = sinfo1->part_info->max_share;
 	if (sinfo2->part_info)
-		val2 = sinfo2->part_info->shared;
+		val2 = sinfo2->part_info->max_share;
+	diff = val1 - val2;
+
+	if (reverse_order)
+		diff = -diff;
+	return diff;
+}
+
+static int _sort_by_priority(void *void1, void *void2)
+{
+	int diff;
+	sinfo_data_t *sinfo1 = (sinfo_data_t *) void1;
+	sinfo_data_t *sinfo2 = (sinfo_data_t *) void2;
+	int val1 = 0, val2 = 0;
+
+	if (sinfo1->part_info)
+		val1 = sinfo1->part_info->priority;
+	if (sinfo2->part_info)
+		val2 = sinfo2->part_info->priority;
 	diff = val1 - val2;
 
 	if (reverse_order)
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 70bee45dc9882f91d5366096a0d493f358612739..d53ee1dcb3048df824d81ea2a6d5f4660f89d6de 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -3315,9 +3315,9 @@ void reset_job_priority(void)
 }
 
 /* 
- * _top_priority - determine if any other job for this partition has a 
- *	higher priority than specified job
- * IN job_ptr - pointer to selected partition
+ * _top_priority - determine if any other job has a higher priority than the
+ *	specified job
+ * IN job_ptr - pointer to selected job
  * RET true if selected job has highest priority
  */
 static bool _top_priority(struct job_record *job_ptr)
@@ -3347,8 +3347,11 @@ static bool _top_priority(struct job_record *job_ptr)
 				continue;
 			if (!job_independent(job_ptr2))
 				continue;
-			if ((job_ptr2->priority >  job_ptr->priority) &&
-			    (job_ptr2->part_ptr == job_ptr->part_ptr)) {
+			if ((job_ptr2->part_ptr->priority > 
+			     job_ptr ->part_ptr->priority) ||
+			    ((job_ptr2->part_ptr->priority ==
+			      job_ptr ->part_ptr->priority) &&
+			     (job_ptr2->priority >  job_ptr->priority))) {
 				top = false;
 				break;
 			}
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index a56f8f99a90705ae12f68cfe068b30df53c7ba30..7dec58858fb18c67b2d1d2ba98acd88d0cb9e3cb 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -2,7 +2,7 @@
  * job_scheduler.c - manage the scheduling of pending jobs in priority order
  *	Note there is a global job list (job_list)
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  UCRL-CODE-226842.
@@ -61,8 +61,9 @@
 #define MAX_RETRIES 10
 
 struct job_queue {
-	int priority;
 	struct job_record *job_ptr;
+	uint32_t job_priority;
+	uint16_t part_priority;
 };
 
 static int  _build_job_queue(struct job_queue **job_queue);
@@ -104,7 +105,9 @@ static int _build_job_queue(struct job_queue **job_queue)
 				 sizeof(struct job_queue));
 		}
 		my_job_queue[job_queue_size].job_ptr  = job_ptr;
-		my_job_queue[job_queue_size].priority = job_ptr->priority;
+		my_job_queue[job_queue_size].job_priority = job_ptr->priority;
+		my_job_queue[job_queue_size].part_priority = 
+						job_ptr->part_ptr->priority;
 		job_queue_size++;
 	}
 	list_iterator_destroy(job_iterator);
@@ -283,26 +286,42 @@ int schedule(void)
 static void _sort_job_queue(struct job_queue *job_queue, int job_queue_size)
 {
 	int i, j, top_prio_inx;
-	int tmp_prio, top_prio;
 	struct job_record *tmp_job_ptr;
+	uint32_t top_job_prio,  tmp_job_prio;
+	uint16_t top_part_prio, tmp_part_prio;
 
 	for (i = 0; i < job_queue_size; i++) {
-		top_prio = job_queue[i].priority;
-		top_prio_inx = i;
+		top_prio_inx  = i;
+		top_job_prio  = job_queue[i].job_priority;
+		top_part_prio = job_queue[i].part_priority;
+
 		for (j = (i + 1); j < job_queue_size; j++) {
-			if (top_prio >= job_queue[j].priority)
+			if (top_part_prio > job_queue[j].part_priority)
+				continue;
+			if ((top_part_prio == job_queue[j].part_priority) &&
+			    (top_job_prio  >= job_queue[j].job_priority))
 				continue;
-			top_prio = job_queue[j].priority;
-			top_prio_inx = j;
+
+			top_prio_inx  = j;
+			top_job_prio  = job_queue[j].job_priority;
+			top_part_prio = job_queue[j].part_priority;
 		}
 		if (top_prio_inx == i)
-			continue;
-		tmp_prio = job_queue[i].priority;
-		tmp_job_ptr = job_queue[i].job_ptr;
-		job_queue[i].priority = job_queue[top_prio_inx].priority;
-		job_queue[i].job_ptr = job_queue[top_prio_inx].job_ptr;
-		job_queue[top_prio_inx].priority = tmp_prio;
-		job_queue[top_prio_inx].job_ptr = tmp_job_ptr;
+			continue;	/* in correct order */
+
+		/* swap records at top_prio_inx and i */
+		tmp_job_ptr   = job_queue[i].job_ptr;
+		tmp_job_prio  = job_queue[i].job_priority;
+		tmp_part_prio = job_queue[i].part_priority;
+
+		job_queue[i].job_ptr       = job_queue[top_prio_inx].job_ptr;
+		job_queue[i].job_priority  = job_queue[top_prio_inx].job_priority;
+		job_queue[i].part_priority = job_queue[top_prio_inx].part_priority;
+
+		job_queue[top_prio_inx].job_ptr       = tmp_job_ptr;
+		job_queue[top_prio_inx].job_priority  = tmp_job_prio;
+		job_queue[top_prio_inx].part_priority = tmp_part_prio;
+
 	}
 }
 
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 53353434cbea44f85fc3e28d60bfd7d5f81126fe..690660987a42bcaf89fe8f8da784c2dd5969e54e 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -351,13 +351,13 @@ _job_count_bitmap(bitstr_t * bitmap, bitstr_t * jobmap, int job_cnt)
  *
  * IN user_flag - may be 0 (do not share nodes), 1 (node sharing allowed),
  *                or any other number means "don't care"
- * IN part_enum - current partition's node sharing policy
+ * IN part_max_share - current partition's node sharing policy
  * IN cons_res_flag - 1 if the consumable resources flag is enable, 0 otherwise
  *
  * RET - 1 if nodes can be shared, 0 if nodes cannot be shared
  */
 static int
-_resolve_shared_status(uint16_t user_flag, uint16_t part_enum,
+_resolve_shared_status(uint16_t user_flag, uint16_t part_max_share,
 		       int cons_res_flag)
 {
 	int shared;
@@ -368,7 +368,7 @@ _resolve_shared_status(uint16_t user_flag, uint16_t part_enum,
 		 * the partition or user has to explicitly disable sharing to
 		 * get exclusive nodes.
 		 */
-		if ((part_enum == SHARED_EXCLUSIVE) || (user_flag == 0))
+		if ((part_max_share == 0) || (user_flag == 0))
 			shared = 0;
 		else
 			shared = 1;
@@ -376,9 +376,9 @@ _resolve_shared_status(uint16_t user_flag, uint16_t part_enum,
 		/* The partition sharing option is only used if
 		 * the consumable resources plugin is NOT in use.
 		 */
-		if (part_enum == SHARED_FORCE)   /* shared=force */
+		if (part_max_share == (uint16_t) INFINITE)  /* shared=force */
 			shared = 1;
-		else if (part_enum == SHARED_NO) /* can't share */
+		else if (part_max_share <= 1)	/* can't share */
 			shared = 0;
 		else
 			shared = (user_flag == 1) ? 1 : 0;
@@ -592,7 +592,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
                 return error_code;
 
 	shared = _resolve_shared_status(job_ptr->details->shared,
-					part_ptr->shared, cr_enabled);
+					part_ptr->max_share, cr_enabled);
 	job_ptr->details->shared = shared;
 
         if (cr_enabled) {
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 86035697615490e15f532b063430c837ca2d3284..78e6718ca373c884eb87a7cf039488bcddf18068 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -215,13 +215,14 @@ struct part_record *create_part_record(void)
 
 	xassert (part_ptr->magic = PART_MAGIC);  /* set value */
 	strcpy(part_ptr->name, "DEFAULT");
-	part_ptr->hidden    = default_part.hidden;
+	part_ptr->hidden       = default_part.hidden;
 	part_ptr->max_time     = default_part.max_time;
 	part_ptr->max_nodes    = default_part.max_nodes;
 	part_ptr->min_nodes    = default_part.min_nodes;
 	part_ptr->root_only    = default_part.root_only;
 	part_ptr->state_up     = default_part.state_up;
-	part_ptr->shared       = default_part.shared;
+	part_ptr->max_share    = default_part.max_share;
+	part_ptr->priority     = default_part.priority;
 	part_ptr->node_bitmap  = NULL;
 
 	if (default_part.allow_groups)
@@ -362,19 +363,20 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 	else
 		default_part_flag = 0;
 
-	packstr(part_ptr->name, buffer);
-	pack32(part_ptr->max_time, buffer);
+	packstr(part_ptr->name,     buffer);
+	pack32(part_ptr->max_time,  buffer);
 	pack32(part_ptr->max_nodes, buffer);
 	pack32(part_ptr->min_nodes, buffer);
 
-	pack16(default_part_flag, buffer);
-	pack16(part_ptr->hidden, buffer);
+	pack16(default_part_flag,   buffer);
+	pack16(part_ptr->hidden,    buffer);
 	pack16(part_ptr->root_only, buffer);
-	pack16(part_ptr->shared, buffer);
+	pack16(part_ptr->max_share, buffer);
+	pack16(part_ptr->priority,  buffer);
 
-	pack16(part_ptr->state_up, buffer);
+	pack16(part_ptr->state_up,  buffer);
 	packstr(part_ptr->allow_groups, buffer);
-	packstr(part_ptr->nodes, buffer);
+	packstr(part_ptr->nodes,    buffer);
 }
 
 /*
@@ -388,7 +390,8 @@ int load_all_part_state(void)
 	char *part_name, *allow_groups, *nodes, *state_file, *data = NULL;
 	uint32_t max_time, max_nodes, min_nodes;
 	time_t time;
-	uint16_t name_len, def_part_flag, hidden, root_only, shared, state_up;
+	uint16_t name_len, def_part_flag, hidden, root_only;
+	uint16_t max_share, priority, state_up;
 	struct part_record *part_ptr;
 	uint32_t data_size = 0;
 	int data_allocated, data_read = 0, error_code = 0, part_cnt = 0;
@@ -439,9 +442,10 @@ int load_all_part_state(void)
 		safe_unpack32(&min_nodes, buffer);
 
 		safe_unpack16(&def_part_flag, buffer);
-		safe_unpack16(&hidden, buffer);
+		safe_unpack16(&hidden,    buffer);
 		safe_unpack16(&root_only, buffer);
-		safe_unpack16(&shared, buffer);
+		safe_unpack16(&max_share, buffer);
+		safe_unpack16(&priority,  buffer);
 
 		safe_unpack16(&state_up, buffer);
 		safe_unpackstr_xmalloc(&allow_groups, &name_len, buffer);
@@ -450,10 +454,10 @@ int load_all_part_state(void)
 		/* validity test as possible */
 		if ((def_part_flag > 1) ||
 		    (root_only > 1) || (hidden > 1) ||
-		    (shared > SHARED_EXCLUSIVE) || (state_up > 1)) {
+		    (state_up > 1)) {
 			error("Invalid data for partition %s: def_part_flag=%u, "
-				"hidden=%u root_only=%u, shared=%u, state_up=%u",
-				part_name, def_part_flag, hidden, root_only, shared,
+				"hidden=%u root_only=%u, state_up=%u",
+				part_name, def_part_flag, hidden, root_only,
 				state_up);
 			error("No more partition data will be processed from "
 				"the checkpoint file");
@@ -477,8 +481,9 @@ int load_all_part_state(void)
 				default_part_loc = part_ptr;
 			}
 			part_ptr->root_only = root_only;
-			part_ptr->shared = shared;
-			part_ptr->state_up = state_up;
+			part_ptr->max_share = max_share;
+			part_ptr->priority  = priority;
+			part_ptr->state_up  = state_up;
 			xfree(part_ptr->allow_groups);
 			part_ptr->allow_groups = allow_groups;
 			xfree(part_ptr->nodes);
@@ -533,7 +538,8 @@ int init_part_conf(void)
 	default_part.min_nodes   = 1;
 	default_part.root_only   = 0;
 	default_part.state_up    = 1;
-	default_part.shared      = SHARED_NO;
+	default_part.max_share   = 1;
+	default_part.priority    = 1;
 	default_part.total_nodes = 0;
 	default_part.total_cpus  = 0;
 	xfree(default_part.nodes);
@@ -732,10 +738,11 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 				&node_scaling);
 	pack16(node_scaling, buffer);
 	pack32(part_ptr->total_cpus, buffer);
-	pack16(default_part_flag, buffer);
-	pack16(part_ptr->hidden, buffer);
-	pack16(part_ptr->root_only, buffer);
-	pack16(part_ptr->shared, buffer);
+	pack16(default_part_flag,    buffer);
+	pack16(part_ptr->hidden,     buffer);
+	pack16(part_ptr->root_only,  buffer);
+	pack16(part_ptr->max_share,  buffer);
+	pack16(part_ptr->priority,   buffer);
 
 	pack16(part_ptr->state_up, buffer);
 	packstr(part_ptr->allow_groups, buffer);
@@ -782,8 +789,8 @@ int update_part(update_part_msg_t * part_desc)
 	last_part_update = time(NULL);
 
 	if (part_desc->hidden != (uint16_t) NO_VAL) {
-		info("update_part: setting hidden to %u for partition %s", 
-		     part_desc->hidden, part_desc->name);
+		info("update_part: setting hidden to %u for partition %s",
+			part_desc->hidden, part_desc->name);
 		part_ptr->hidden = part_desc->hidden;
 	}
 
@@ -817,10 +824,16 @@ int update_part(update_part_msg_t * part_desc)
 		part_ptr->state_up = part_desc->state_up;
 	}
 
-	if (part_desc->shared != (uint16_t) NO_VAL) {
-		info("update_part: setting shared to %u for partition %s",
-		     part_desc->shared, part_desc->name);
-		part_ptr->shared = part_desc->shared;
+	if (part_desc->max_share != (uint16_t) NO_VAL) {
+		info("update_part: setting max_share to %u for partition %s",
+		     part_desc->max_share, part_desc->name);
+		part_ptr->max_share = part_desc->max_share;
+	}
+
+	if (part_desc->priority != (uint16_t) NO_VAL) {
+		info("update_part: setting priority to %u for partition %s",
+		     part_desc->priority, part_desc->name);
+		part_ptr->priority = part_desc->priority;
 	}
 
 	if ((part_desc->default_part == 1) &&
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index e56c452a8b9e975b40b60639a87b15611c3e57b3..3244da8f3c6bcf60aa63074285a237fc61d61a39 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -356,6 +356,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->ret2service         = conf->ret2service;
 	conf_ptr->schedport           = conf->schedport;
 	conf_ptr->schedrootfltr       = conf->schedrootfltr;
+	conf_ptr->sched_time_slice    = conf->sched_time_slice;
 	conf_ptr->schedtype           = xstrdup(conf->schedtype);
 	conf_ptr->select_type         = xstrdup(conf->select_type);
 	conf_ptr->select_type_param   = conf->select_type_param;
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index f626e87cf12ba05326db2b1e2090439ee11b37a2..394e90ff0095b3294da6e875c68e2b320f06fc83 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -607,11 +607,12 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	}
 	part_ptr->hidden    = part->hidden_flag ? 1 : 0;
 	part_ptr->max_time  = part->max_time;
+	part_ptr->max_share = part->max_share;
 	part_ptr->max_nodes = part->max_nodes;
 	part_ptr->min_nodes = part->min_nodes;
+	part_ptr->priority  = part->priority;
 	part_ptr->root_only = part->root_only_flag ? 1 : 0;
 	part_ptr->state_up  = part->state_up_flag ? 1 : 0;
-	part_ptr->shared    = part->shared;
 	if (part->allow_groups) {
 		xfree(part_ptr->allow_groups);
 		part_ptr->allow_groups = xstrdup(part->allow_groups);
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 756593eaa28f9d29dd65d8d2d453d19cba040cfa..d96eb033c4647179706da161600fa6ad6cc298e8 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -242,7 +242,8 @@ struct part_record {
 	uint32_t max_offset;	/* select plugin max offset */
 	uint16_t root_only;	/* 1 if allocate/submit RPC can only be 
 				   issued by user root */
-	uint16_t shared;	/* See part_shared in slurm.h */
+	uint16_t max_share;	/* number of jobs to gang schedule */
+	uint16_t priority;	/* scheduling priority for jobs */
 	uint16_t state_up;	/* 1 if state is up, 0 if down */
 	char *nodes;		/* comma delimited list names of nodes */
 	char *allow_groups;	/* comma delimited list of groups,