diff --git a/NEWS b/NEWS
index 95f5daa7ab6065398293c33578af691a5967a3ad..1ca2ab4a3ecd4711c7fdad2e67717aace55156bb 100644
--- a/NEWS
+++ b/NEWS
@@ -31,7 +31,9 @@ documents those changes that are of interest to users and admins.
  -- Added contribs/cray/libalps_test_programs.tar.gz with tools to validate
     SLURM's logic used to support Cray systems.
  -- Create RPM for srun command that is a wrapper for the Cray/ALPS aprun
-    command. Dependent upon .rpmmacros parameter of "%_with_srun2aprun"
+    command. Dependent upon .rpmmacros parameter of "%_with_srun2aprun".
+ -- Add configuration parameter MaxStepCount to limit effect of bad batch
+    scripts.
  -- NOTE: THERE HAS BEEN A NEW FIELD ADDED TO THE JOB STATE FILE. UPGRADES FROM
     VERSION 2.3.0-PRE4 WILL RESULT IN LOST JOBS UNLESS THE "orig_dependency"
     FIELD IS REMOVED FROM JOB STATE SAVE/RESTORE LOGIC.
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index 61b6623552ec9b15d3852dfb92cb6ad509c6526a..e97b836ebfdf1e2f4758d67245d2550967eeae5e 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -41,6 +41,8 @@ CONFIGURATION FILE CHANGES (see "man slurm.conf" for details)
 * DebugFlags of Frontend added
 * Added new configuration parameter MaxJobId. Use with FirstJobId to limit
   range of job ID values.
+* Added new configuration parameter MaxStepCount to limit the effect of
+  bad batch scripts. The default value is 40,000 steps per job.
 * Change node configuration parameter from "Procs" to "CPUs". Both parameters
   will be supported for now.
 
@@ -100,6 +102,8 @@ partition_info_t
 slurm_ctl_conf
 	max_job_id		maximum supported job id before starting over
 				with first_job_id
+	max_step_count		maximum number of job steps permitted per job
+
 slurm_step_layout
 	front_end		name of front end host running the step
 
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index 6b5992c3bfd8046ceb4d6f67bfae8a16944fdf28..ba50df05a033d61e41381cb271323a31e068de86 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -145,6 +145,7 @@ function displayfile()
    "#Licenses=foo*4,bar <br>" +
    "#MailProg=/bin/mail <br>" +
    "#MaxJobCount=5000 <br>" +
+   "#MaxStepCount=40000 <br>" +
    "#MaxTasksPerNode=128 <br>" +
    "MpiDefault=" + get_radio_value(document.config.mpi_default) + "<br>" +
    "#MpiParams=ports=#-# <br>" +
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 3b9419ba4b6b22e3092df8d9b23b21292ec6cc3f..591c2547f7d0686fef49d77ba0d4cdd12d38798a 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -773,6 +773,12 @@ NOTE: Enforcement of memory limits currently requires enabling of
 accounting, which samples memory use on a periodic basis (data need
 not be stored, just collected).
 
+.TP
+\fBMaxStepCount\fR
+The maximum number of steps that any job can initiate. This parameter
+is intended to limit the effect of bad batch scripts.
+The default value is 40000 steps.
+
 .TP
 \fBMaxTasksPerNode\fR
 Maximum number of tasks SLURM will allow a job step to spawn
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index e4268bb69d11ca2cadf3abd1f4f06876ada19aa2..94526a45ef9e85f142a4156973514eab3e1d9239 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -1788,6 +1788,7 @@ typedef struct slurm_ctl_conf {
 	uint32_t max_job_cnt;	/* maximum number of active jobs */
 	uint32_t max_job_id;	/* maximum job id before using first_job_id */
 	uint32_t max_mem_per_cpu; /* maximum MB memory per allocated CPU */
+	uint32_t max_step_cnt;	/* maximum number of steps per job */
 	uint16_t max_tasks_per_node; /* maximum tasks per node */
 	uint16_t min_job_age;	/* COMPLETED jobs over this age (secs)
 				 * purged from in memory records */
diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h
index 26f8dd56f1609dd824b088d67dd8a732f40e68b4..838b9cf3f65ff17dc5c8cdfb617d70483b4a76f4 100644
--- a/slurm/slurm_errno.h
+++ b/slurm/slurm_errno.h
@@ -188,6 +188,7 @@ enum {
 	ESLURM_QOS_THRES,
 	ESLURM_PARTITION_IN_USE,
 	ESLURM_EXPAND_GRES,
+	ESLURM_STEP_LIMIT,
 
 	/* switch specific error codes, specific values defined in plugin module */
 	ESLURM_SWITCH_MIN = 3000,
diff --git a/src/api/config_info.c b/src/api/config_info.c
index c878ae944a0116f36dff037785d43672c9726a56..4a224f1522b618568febb687d7fefa1bc439d255 100644
--- a/src/api/config_info.c
+++ b/src/api/config_info.c
@@ -519,6 +519,13 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 		key_pair->value = xstrdup("UNLIMITED");
 	}
 
+	snprintf(tmp_str, sizeof(tmp_str), "%u",
+		 slurm_ctl_conf_ptr->max_step_cnt);
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MaxStepCount");
+	key_pair->value = xstrdup(tmp_str);
+	list_append(ret_list, key_pair);
+
 	snprintf(tmp_str, sizeof(tmp_str), "%u",
 		 slurm_ctl_conf_ptr->max_tasks_per_node);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 5af8db38aead9a06c011429338cb57f6b79bd89e..70bbf493350287652d708a2c03b904080f8c0ad2 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -208,6 +208,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"MaxJobId", S_P_UINT32},
 	{"MaxMemPerCPU", S_P_UINT32},
 	{"MaxMemPerNode", S_P_UINT32},
+	{"MaxStepCount", S_P_UINT32},
 	{"MaxTasksPerNode", S_P_UINT16},
 	{"MessageTimeout", S_P_UINT16},
 	{"MinJobAge", S_P_UINT16},
@@ -1859,9 +1860,10 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->kill_wait			= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->licenses);
 	xfree (ctl_conf_ptr->mail_prog);
-	ctl_conf_ptr->max_job_cnt		= (uint16_t) NO_VAL;
+	ctl_conf_ptr->max_job_cnt		= (uint32_t) NO_VAL;
 	ctl_conf_ptr->max_job_id		= NO_VAL;
 	ctl_conf_ptr->max_mem_per_cpu           = 0;
+	ctl_conf_ptr->max_step_cnt		= (uint32_t) NO_VAL;
 	ctl_conf_ptr->min_job_age		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->mpi_default);
 	xfree (ctl_conf_ptr->mpi_params);
@@ -2223,6 +2225,11 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (s_p_get_uint32(&conf->max_job_cnt, "MaxJobCount", hashtbl) &&
 	    (conf->max_job_cnt < 1))
 		fatal("MaxJobCount=%u, No jobs permitted", conf->max_job_cnt);
+	if (s_p_get_uint32(&conf->max_step_cnt, "MaxStepCount", hashtbl) &&
+	    (conf->max_step_cnt < 1)) {
+		fatal("MaxStepCount=%u, No steps permitted",
+		      conf->max_step_cnt);
+	}
 
 	if (!s_p_get_string(&conf->authtype, "AuthType", hashtbl))
 		conf->authtype = xstrdup(DEFAULT_AUTH_TYPE);
@@ -2424,6 +2431,9 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->max_mem_per_cpu = DEFAULT_MAX_MEM_PER_CPU;
 	}
 
+	if (!s_p_get_uint32(&conf->max_step_cnt, "MaxStepCount", hashtbl))
+		conf->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
+
 	if (!s_p_get_uint16(&conf->max_tasks_per_node, "MaxTasksPerNode",
 			    hashtbl)) {
 		conf->max_tasks_per_node = DEFAULT_MAX_TASKS_PER_NODE;
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 80612b6a029b19e3696a5333a71a3cd0ba4cdf2c..7d9b53b35cc6d4ccec4d7d36ed6f9d281e131df3 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -88,6 +88,7 @@ extern char *default_plugstack;
 #define DEFAULT_MAIL_PROG           "/bin/mail"
 #define DEFAULT_MAX_JOB_COUNT       10000
 #define DEFAULT_MAX_JOB_ID          0xffff0000
+#define DEFAULT_MAX_STEP_COUNT      40000
 #define DEFAULT_MEM_PER_CPU         0
 #define DEFAULT_MAX_MEM_PER_CPU     0
 #define DEFAULT_MIN_JOB_AGE         300
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index 6d7d9c67de903b8c5346ed51d65ca865047f36d5..4c948acbb117c6f6bbdf2a3c3552252cd867cc66 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -278,6 +278,8 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "Partition is in use"					},
 	{ ESLURM_EXPAND_GRES,
 	  "Job expansion with generic resource (gres) not supported"	},
+	{ ESLURM_STEP_LIMIT,
+	  "Step limit reached for this job"			},
 
 	/* slurmd error codes */
 
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index 24a2b8cf2cf2cd4ae2f33ab60f5aacada53b7125..1d9a8a344fba5f1a59acdf2f69cedb25d0fa8eeb 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -4312,6 +4312,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->max_job_cnt, buffer);
 		pack32(build_ptr->max_job_id, buffer);
 		pack32(build_ptr->max_mem_per_cpu, buffer);
+		pack32(build_ptr->max_step_cnt, buffer);
 		pack16(build_ptr->max_tasks_per_node, buffer);
 		pack16(build_ptr->min_job_age, buffer);
 		packstr(build_ptr->mpi_default, buffer);
@@ -4945,6 +4946,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->max_job_cnt, buffer);
 		safe_unpack32(&build_ptr->max_job_id, buffer);
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
+		safe_unpack32(&build_ptr->max_step_cnt, buffer);
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
 		safe_unpack16(&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
@@ -5213,6 +5215,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->max_job_cnt, buffer);
 		build_ptr->max_job_id = DEFAULT_MAX_JOB_ID;
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
+		build_ptr->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
 		safe_unpack16(&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
@@ -5485,6 +5488,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		build_ptr->max_job_id = DEFAULT_MAX_JOB_ID;
 		safe_unpack16(&max_job_cnt, buffer);
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
+		build_ptr->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
 		safe_unpack16(&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 388c8840c8b435ec53e7a45132aca9e6fe123f8e..80722779576527201c8a8f37f9ab3ed840e594cf 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -2,7 +2,7 @@
  *  proc_req.c - process incoming messages to slurmctld
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -519,6 +519,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->max_job_cnt         = conf->max_job_cnt;
 	conf_ptr->max_job_id          = conf->max_job_id;
 	conf_ptr->max_mem_per_cpu     = conf->max_mem_per_cpu;
+	conf_ptr->max_step_cnt        = conf->max_step_cnt;
 	conf_ptr->max_tasks_per_node  = conf->max_tasks_per_node;
 	conf_ptr->min_job_age         = conf->min_job_age;
 	conf_ptr->mpi_default         = xstrdup(conf->mpi_default);
@@ -1657,8 +1658,8 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	/* return result */
 	if (error_code) {
 		unlock_slurmctld(job_write_lock);
-		info("_slurm_rpc_job_step_create: %s",
-		     slurm_strerror(error_code));
+		info("_slurm_rpc_job_step_create for job %u: %s",
+		     req_step_msg->job_id, slurm_strerror(error_code));
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		slurm_step_layout_t *layout = step_rec->step_layout;
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 3d2d1384d0e52f796ca2cae2310c7bc4bacc6af1..0b084905f395bb4a7c22127bafe132354ede981e 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1625,6 +1625,9 @@ step_create(job_step_create_request_msg_t *step_specs,
 	    _test_strlen(step_specs->node_list, "node_list", 1024*64))
 		return ESLURM_PATHNAME_TOO_LONG;
 
+	if (job_ptr->next_step_id >= slurmctld_conf.max_step_cnt)
+		return ESLURM_STEP_LIMIT;
+
 #if defined HAVE_BGQ
 //#if defined HAVE_BGQ && defined HAVE_BG_FILES
 	select_g_select_jobinfo_get(job_ptr->select_jobinfo,