From f6b12b214a7a80f3cd0ade99f79f7b1b6955db2c Mon Sep 17 00:00:00 2001
From: Tim Wickberg <tim@schedmd.com>
Date: Fri, 17 Aug 2018 01:25:10 -0600
Subject: [PATCH] Remove HAVE_ALPS_CRAY and related code.

---
 configure.ac                           |  1 -
 contribs/README                        | 19 ------
 contribs/cray/Makefile.am              |  1 -
 contribs/cray/Makefile.in              |  1 -
 contribs/pam/README                    |  3 +-
 contribs/slurm.spec-legacy             | 21 +------
 slurm/slurm.h.in                       |  6 +-
 slurm/slurmdb.h                        |  5 +-
 src/common/env.c                       | 35 -----------
 src/common/node_conf.h                 |  5 --
 src/common/node_select.c               | 15 -----
 src/common/read_config.c               | 18 +-----
 src/common/read_config.h               | 14 +----
 src/common/working_cluster.c           | 17 +----
 src/common/working_cluster.h           |  5 +-
 src/plugins/sched/backfill/backfill.c  | 21 -------
 src/salloc/Makefile.am                 |  6 --
 src/salloc/Makefile.in                 |  5 +-
 src/salloc/opt.c                       | 35 -----------
 src/salloc/salloc.c                    | 36 -----------
 src/sbatch/opt.c                       |  4 +-
 src/sbatch/sbatch.c                    | 36 -----------
 src/sbcast/sbcast.c                    |  4 --
 src/slurmctld/controller.c             |  4 --
 src/slurmctld/job_mgr.c                |  5 --
 src/slurmctld/job_scheduler.c          | 25 --------
 src/slurmctld/node_mgr.c               |  8 ---
 src/slurmctld/proc_req.c               | 42 +------------
 src/slurmctld/step_mgr.c               | 60 +-----------------
 src/slurmd/slurmd/req.c                | 22 -------
 src/slurmd/slurmstepd/mgr.c            | 87 --------------------------
 src/slurmd/slurmstepd/slurmstepd_job.c | 18 ------
 src/srun/libsrun/allocate.c            | 11 +---
 src/srun/libsrun/opt.c                 |  1 -
 src/srun/libsrun/opt.h                 |  1 -
 src/srun/libsrun/srun_job.c            |  6 +-
 src/sstat/sstat.c                      |  5 --
 src/sview/job_info.c                   | 47 +-------------
 38 files changed, 29 insertions(+), 626 deletions(-)

diff --git a/configure.ac b/configure.ac
index cc201e2020e..99eca89ece4 100644
--- a/configure.ac
+++ b/configure.ac
@@ -247,7 +247,6 @@ fi
 
 AM_CONDITIONAL(BUILD_SVIEW, [test "x$ac_glib_test" = "xyes"] && [test "x$ac_gtk_test" = "xyes"])
 
-dnl Cray ALPS/Basil support depends on mySQL
 X_AC_CRAY
 
 dnl checks for system services.
diff --git a/contribs/README b/contribs/README
index 69e16d9feeb..2c497c9fc09 100644
--- a/contribs/README
+++ b/contribs/README
@@ -16,25 +16,6 @@ of the Slurm contribs distribution follows:
      cnselect              - Script used to emulate some cnselect functionality
                              for testing without access to a Cray system.
      etc_sysconfig_slurm   - /etc/sysconfig/slurm for Cray XT/XE systems
-     libalps_test_programs.tar.gz - set of tools to verify ALPS/BASIL support
-			     logic. Note that this currently requires:
-			     * hardcoding in libsdb/basil_mysql_routines.c:
-			       mysql_real_connect(handle, "localhost", NULL, NULL, "XT5istanbul"
-			     * suitable /etc/my.cnf, containing at least the lines
-			       [client]
-			       user=basic
-			       password=basic
-			     * setting the APBASIL in the libalps/Makefile, e.g.
-			       APBASIL := slurm/alps_simulator/apbasil.sh
-			     To use, extract the files then:
-			     > cd libasil/
-			     > make -C alps_tests all   # runs basil parser tests
-			     > make -C sdb_tests  all   # checks if database routines work
-			     A tool named tuxadmin is also also included. When
-			     executed with the -s or --slurm.conf option, this
-			     contact the SDB to generate system-specific information
-			     needed in slurm.conf (e.g. "NodeName=nid..." and
-			     "PartitionName= Nodes=nid... MaxNodes=...".
      opt_modulefiles_slurm - enables use of Munge as soon as built
      pam_job.c             - Less verbose version of the default Cray job service.
 
diff --git a/contribs/cray/Makefile.am b/contribs/cray/Makefile.am
index dd5161690a4..b700948a3ae 100644
--- a/contribs/cray/Makefile.am
+++ b/contribs/cray/Makefile.am
@@ -32,7 +32,6 @@ endif
 EXTRA_DIST = \
 	$(NATIVE_EXTRA_DIST)		\
 	etc_sysconfig_slurm		\
-	libalps_test_programs.tar.gz	\
 	opt_modulefiles_slurm.in	\
 	pam_job.c 			\
 	plugstack.conf.template		\
diff --git a/contribs/cray/Makefile.in b/contribs/cray/Makefile.in
index 5790ae4d950..604e0ab3719 100644
--- a/contribs/cray/Makefile.in
+++ b/contribs/cray/Makefile.in
@@ -587,7 +587,6 @@ AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(JSON_CPPFLAGS)
 EXTRA_DIST = \
 	$(NATIVE_EXTRA_DIST)		\
 	etc_sysconfig_slurm		\
-	libalps_test_programs.tar.gz	\
 	opt_modulefiles_slurm.in	\
 	pam_job.c 			\
 	plugstack.conf.template		\
diff --git a/contribs/pam/README b/contribs/pam/README
index 93f6209276e..c6b34e84c60 100644
--- a/contribs/pam/README
+++ b/contribs/pam/README
@@ -36,8 +36,7 @@ Description:
 
 Notes:
   This module will not work on systems where the hostname returned by the
-     gethostname() differs from SLURM node name. This includes front-end
-     configurations (IBM BlueGene or Cray/ALPS systems).
+     gethostname() differs from the Slurm node name.
   rsh_kludge - The rsh service under RH71 (rsh-0.17-2.5) truncates the first
      character of this message.  The rsh client sends 3 NUL-terminated ASCII
      strings: client-user-name, server-user-name, and command string.  The
diff --git a/contribs/slurm.spec-legacy b/contribs/slurm.spec-legacy
index a2dcf7a3acc..15a73220e02 100644
--- a/contribs/slurm.spec-legacy
+++ b/contribs/slurm.spec-legacy
@@ -35,7 +35,6 @@ Source:		%{slurm_source_dir}.tar.bz2
 # --with blcr        %_with_blcr          1     require blcr support
 # --with bluegene    %_with_bluegene      1     build bluegene RPM
 # --with cray        %_with_cray          1     build for a Cray system without ALPS
-# --with cray_alps   %_with_cray_alps     1     build for a Cray system with ALPS
 # --with cray_network %_with_cray_network 1     build for a non-Cray system with a Cray network
 # --without debug    %_without_debug      1     don't compile with debugging symbols
 # --with lua         %_with_lua           1     build Slurm lua bindings (proctrack only for now)
@@ -63,7 +62,6 @@ Source:		%{slurm_source_dir}.tar.bz2
 %slurm_without_opt auth_none
 %slurm_without_opt bluegene
 %slurm_without_opt cray
-%slurm_without_opt cray_alps
 %slurm_without_opt cray_network
 %slurm_without_opt salloc_background
 %slurm_without_opt multiple_slurmd
@@ -95,10 +93,6 @@ Source:		%{slurm_source_dir}.tar.bz2
 %slurm_without_opt lua
 %slurm_without_opt partial-attach
 
-%if %{slurm_with cray_alps}
-%slurm_with_opt sgijob
-%endif
-
 Requires: slurm-plugins
 
 %ifos linux
@@ -128,14 +122,6 @@ BuildRequires: mariadb-devel >= 5.0.0
 %endif
 %endif
 
-%if %{slurm_with cray_alps}
-%if %{use_mysql_devel}
-BuildRequires: mysql-devel
-%else
-BuildRequires: mariadb-devel
-%endif
-%endif
-
 %if %{slurm_with cray}
 BuildRequires: cray-libalpscomm_cn-devel
 BuildRequires: cray-libalpscomm_sn-devel
@@ -430,7 +416,7 @@ fi
 
 # Do not package Slurm's version of libpmi on Cray systems.
 # Cray's version of libpmi should be used.
-%if %{slurm_with cray} || %{slurm_with cray_alps}
+%if %{slurm_with cray}
    rm -f $RPM_BUILD_ROOT/%{_libdir}/libpmi*
    %if %{slurm_with cray}
       install -D -m644 contribs/cray/plugstack.conf.template ${RPM_BUILD_ROOT}%{_sysconfdir}/plugstack.conf.template
@@ -656,8 +642,6 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/jobcomp_elasticsearch.so    &&
    echo %{_libdir}/slurm/jobcomp_elasticsearch.so    >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/launch_slurm.so             &&
    echo %{_libdir}/slurm/launch_slurm.so             >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/launch_aprun.so             &&
-   echo %{_libdir}/slurm/launch_aprun.so             >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/mpi_pmix.so                 &&
    echo %{_libdir}/slurm/mpi_pmix.so                 >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/mpi_pmix_v1.so              &&
@@ -749,7 +733,7 @@ rm -rf $RPM_BUILD_ROOT
 %dir %{_libdir}/slurm/src
 %dir /etc/ld.so.conf.d
 /etc/ld.so.conf.d/slurm.conf
-%if %{slurm_with cray} || %{slurm_with cray_alps}
+%if %{slurm_with cray}
 %dir /opt/modulefiles/slurm
 %endif
 %if %{slurm_with cray}
@@ -889,7 +873,6 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/sched_backfill.so
 %{_libdir}/slurm/sched_builtin.so
 %{_libdir}/slurm/sched_hold.so
-%{_libdir}/slurm/select_alps.so
 %{_libdir}/slurm/select_cray.so
 %{_libdir}/slurm/select_cons_res.so
 %{_libdir}/slurm/select_linear.so
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 5ad093609ff..8ecbba5c7de 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -573,8 +573,8 @@ enum select_plugin_type {
 	SELECT_PLUGIN_CONS_RES       = 101, /* Cons Res on a normal system */
 	SELECT_PLUGIN_LINEAR         = 102, /* Linear on a normal system */
 	/* 103 unused (originally used for BGQ) */
-	SELECT_PLUGIN_ALPS           = 104, /* Alps using Linear (only option) */
-	/* 105 unused (originally was for ALPS cons_res which won't happen) */
+	/* 104 unused (originally used for Cray/ALPS with select/linear) */
+	/* 105 unused (originally used for Cray/ALPS with select/cons_res) */
 	SELECT_PLUGIN_SERIAL         = 106, /* Serial */
 	SELECT_PLUGIN_CRAY_LINEAR    = 107, /* Linear on a Native Cray */
 	SELECT_PLUGIN_CRAY_CONS_RES  = 108, /* Cons Res on a Native Cray */
@@ -591,12 +591,10 @@ enum switch_plugin_type {
 };
 
 enum select_jobdata_type {
-	SELECT_JOBDATA_RESV_ID	= 13,	/* data-> uint32_t reservation_id */
 	SELECT_JOBDATA_PAGG_ID	= 14,	/* data-> uint64_t job container ID */
 	SELECT_JOBDATA_PTR	= 15,	/* data-> select_jobinfo_t *jobinfo */
 	SELECT_JOBDATA_BLOCK_PTR = 16,	/* data-> bg_record_t *bg_record */
 	SELECT_JOBDATA_DIM_CNT	= 17,	/* data-> uint16_t dim_cnt */
-	SELECT_JOBDATA_CONFIRMED = 21,	/* data-> uint8_t ALPS reservation confirmed */
 	SELECT_JOBDATA_CLEANING	= 22,	/* data-> uint16_t if the job is in
 					 * cleaning state or not. */
 	SELECT_JOBDATA_NETWORK	= 23,	/* data-> char * network info */
diff --git a/slurm/slurmdb.h b/slurm/slurmdb.h
index f547e8320ee..1fdfb144bba 100644
--- a/slurm/slurmdb.h
+++ b/slurm/slurmdb.h
@@ -214,8 +214,9 @@ enum cluster_fed_states {
 				       /* Removed v17.02 */
 #define CLUSTER_FLAG_MULTSD 0x00000080 /* This cluster is multiple slurmd */
 #define CLUSTER_FLAG_CRAYXT 0x00000100 /* This cluster is a ALPS cray
-					* (deprecated) Same as CRAY_A */
-#define CLUSTER_FLAG_CRAY_A 0x00000100 /* This cluster is a ALPS cray */
+					* Removed v19.05 */
+#define CLUSTER_FLAG_CRAY_A 0x00000100 /* This cluster is a ALPS cray
+					* Removed v19.05 */
 #define CLUSTER_FLAG_FE     0x00000200 /* This cluster is a front end system */
 #define CLUSTER_FLAG_CRAY_N 0x00000400 /* This cluster is a Native cray */
 #define CLUSTER_FLAG_FED    0x00000800 /* This cluster is in a federation. */
diff --git a/src/common/env.c b/src/common/env.c
index c814dc7b63a..da92909e218 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -92,30 +92,6 @@ strong_alias(env_unset_environment,	slurm_env_unset_environment);
 #define MAX_ENV_STRLEN (32 * 4096)	/* Needed for CPU_BIND and MEM_BIND on
 					 * SGI systems with huge CPU counts */
 
-static int _setup_particulars(uint32_t cluster_flags,
-			       char ***dest,
-			       dynamic_plugin_data_t *select_jobinfo)
-{
-	int rc = SLURM_SUCCESS;
-	if (cluster_flags & CLUSTER_FLAG_CRAY_A) {
-		uint32_t resv_id = 0;
-
-		select_g_select_jobinfo_get(select_jobinfo,
-					    SELECT_JOBDATA_RESV_ID,
-					    &resv_id);
-		if (resv_id) {
-			setenvf(dest, "BASIL_RESERVATION_ID", "%u", resv_id);
-		} else {
-			/* This is not an error for a Slurm job allocation with
-			 * no compute nodes and no BASIL reservation */
-			verbose("Can't set BASIL_RESERVATION_ID "
-			        "environment variable");
-		}
-	}
-
-	return rc;
-}
-
 /*
  *  Return pointer to `name' entry in environment if found, or
  *   pointer to the last entry (i.e. NULL) if `name' is not
@@ -624,11 +600,6 @@ int setup_env(env_t *env, bool preserve_env)
 		rc = SLURM_FAILURE;
 	}
 
-	if (env->select_jobinfo) {
-		_setup_particulars(cluster_flags, &env->env,
-				   env->select_jobinfo);
-	}
-
 	if (env->jobid >= 0) {
 		if (setenvf(&env->env, "SLURM_JOB_ID", "%d", env->jobid)) {
 			error("Unable to set SLURM_JOB_ID environment");
@@ -952,7 +923,6 @@ extern int env_array_for_job(char ***dest,
 	char *key, *value;
 	slurm_step_layout_t *step_layout = NULL;
 	int i, rc = SLURM_SUCCESS;
-	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	slurm_step_layout_req_t step_layout_req;
 	uint16_t cpus_per_task_array[1];
 	uint32_t cpus_task_reps[1];
@@ -966,8 +936,6 @@ extern int env_array_for_job(char ***dest,
 	cpus_per_task_array[0] = desc->cpus_per_task;
 	cpus_task_reps[0] = alloc->node_cnt;
 
-	_setup_particulars(cluster_flags, dest, alloc->select_jobinfo);
-
 	if (pack_offset < 1) {
 		env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u",
 					alloc->job_id);
@@ -1165,7 +1133,6 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	slurm_step_layout_t *step_layout = NULL;
 	uint16_t cpus_per_task;
 	uint32_t task_dist;
-	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	slurm_step_layout_req_t step_layout_req;
 	uint16_t cpus_per_task_array[1];
 	uint32_t cpus_task_reps[1];
@@ -1176,8 +1143,6 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	memset(&step_layout_req, 0, sizeof(slurm_step_layout_req_t));
 	step_layout_req.num_tasks = batch->ntasks;
 
-	_setup_particulars(cluster_flags, dest, batch->select_jobinfo);
-
 	/*
 	 * There is no explicit node count in the batch structure,
 	 * so we need to calculate the node count.
diff --git a/src/common/node_conf.h b/src/common/node_conf.h
index ff8e514ccfd..109acfabdc0 100644
--- a/src/common/node_conf.h
+++ b/src/common/node_conf.h
@@ -151,11 +151,6 @@ struct node_record {
 					 * or other sequence number used to
 					 * order nodes by location,
 					 * no need to save/restore */
-#ifdef HAVE_ALPS_CRAY
-	uint32_t basil_node_id;		/* Cray-XT BASIL node ID,
-					 * no need to save/restore */
-	time_t down_time;		/* When first set to DOWN state */
-#endif	/* HAVE_ALPS_CRAY */
 	acct_gather_energy_t *energy;	/* power consumption data */
 	ext_sensors_data_t *ext_sensors; /* external sensor data */
 	power_mgmt_data_t *power;	/* power management data */
diff --git a/src/common/node_select.c b/src/common/node_select.c
index 30966198cfd..932b4c8ced8 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -175,21 +175,6 @@ extern int slurm_select_init(bool only_default)
 	if (working_cluster_rec) {
 		/* just ignore warnings here */
 	} else {
-#ifdef HAVE_ALPS_CRAY
-		if (xstrcasecmp(select_type, "select/alps")) {
-			error("%s is incompatible with Cray system "
-			      "running alps", select_type);
-			fatal("Use SelectType=select/alps");
-		}
-#else
-		if (!xstrcasecmp(select_type, "select/alps")) {
-			fatal("Requested SelectType=select/alps "
-			      "in slurm.conf, but not running on a ALPS Cray "
-			      "system.  If looking to emulate a Alps Cray "
-			      "system use --enable-alps-cray-emulation.");
-		}
-#endif
-
 #ifdef HAVE_NATIVE_CRAY
 		if (xstrcasecmp(select_type, "select/cray")) {
 			error("%s is incompatible with a native Cray system.",
diff --git a/src/common/read_config.c b/src/common/read_config.c
index c69c660ee0a..a730dd61664 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -4336,12 +4336,6 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		}
 		if (conf->prolog_flags & PROLOG_FLAG_NOHOLD) {
 			conf->prolog_flags |= PROLOG_FLAG_ALLOC;
-#ifdef HAVE_ALPS_CRAY
-			error("PrologFlags=NoHold is not compatible when "
-			      "running on ALPS/Cray systems");
-			conf->prolog_flags &= (~PROLOG_FLAG_NOHOLD);
-			return SLURM_ERROR;
-#endif
 		}
 		xfree(temp_str);
 	} else { /* Default: no Prolog Flags are set */
@@ -4389,12 +4383,6 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	if (!s_p_get_uint16(&conf->ret2service, "ReturnToService", hashtbl))
 		conf->ret2service = DEFAULT_RETURN_TO_SERVICE;
-#ifdef HAVE_ALPS_CRAY
-	if (conf->ret2service > 1) {
-		error("ReturnToService > 1 is not supported on ALPS Cray");
-		return SLURM_ERROR;
-	}
-#endif
 
 	(void) s_p_get_string(&conf->resv_epilog, "ResvEpilog", hashtbl);
 	(void) s_p_get_uint16(&conf->resv_over_run, "ResvOverRun", hashtbl);
@@ -4472,11 +4460,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	}
 #ifdef HAVE_REAL_CRAY
 	/*
-	 * This requirement derives from Cray ALPS:
-	 * - ALPS reservations can only be created by the job owner or root
-	 *   (confirmation may be done by other non-privileged users);
-	 * - freeing a reservation always requires root privileges.
-	 * Even when running on Native Cray the SlurmUser must be root
+	 * When running on Native Cray the SlurmUser must be root
 	 * to access the needed libraries.
 	 */
 	if (conf->slurm_user_id != 0) {
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 9a5c6ab4132..e30c235335e 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -116,8 +116,6 @@ extern uint16_t drop_priv_flag;
 
 #if defined HAVE_LIBNRT
 #  define DEFAULT_LAUNCH_TYPE         "launch/poe"
-#elif defined HAVE_ALPS_CRAY && defined HAVE_REAL_CRAY
-#  define DEFAULT_LAUNCH_TYPE         "launch/aprun"
 #else
 #  define DEFAULT_LAUNCH_TYPE         "launch/slurm"
 #endif
@@ -138,11 +136,7 @@ extern uint16_t drop_priv_flag;
 #define DEFAULT_MSG_TIMEOUT         10
 #define DEFAULT_POWER_PLUGIN        ""
 #define DEFAULT_CHECKPOINT_TYPE     "checkpoint/none"
-#if defined HAVE_REAL_CRAY/* ALPS requires cluster-unique job container IDs */
-#  define DEFAULT_PROCTRACK_TYPE    "proctrack/sgi_job"
-#else
-#  define DEFAULT_PROCTRACK_TYPE    "proctrack/cgroup"
-#endif
+#define DEFAULT_PROCTRACK_TYPE      "proctrack/cgroup"
 #define DEFAULT_PREEMPT_TYPE        "preempt/none"
 #define DEFAULT_PRIORITY_DECAY      604800 /* 7 days */
 #define DEFAULT_PRIORITY_CALC_PERIOD 300 /* in seconds */
@@ -156,9 +150,7 @@ extern uint16_t drop_priv_flag;
 #define DEFAULT_SCHED_LOG_LEVEL     0
 #define DEFAULT_SCHED_TIME_SLICE    30
 #define DEFAULT_SCHEDTYPE           "sched/backfill"
-#if defined HAVE_ALPS_CRAY
-#  define DEFAULT_SELECT_TYPE       "select/alps"
-#elif defined HAVE_NATIVE_CRAY
+#if defined HAVE_NATIVE_CRAY
 #  define DEFAULT_SELECT_TYPE       "select/cray"
 #else
 #  define DEFAULT_SELECT_TYPE       "select/linear"
@@ -184,7 +176,7 @@ extern uint16_t drop_priv_flag;
 #define DEFAULT_TASK_PLUGIN         "task/none"
 #define DEFAULT_TCP_TIMEOUT         2
 #define DEFAULT_TMP_FS              "/tmp"
-#if defined HAVE_3D && !defined HAVE_ALPS_CRAY
+#if defined HAVE_3D
 #  define DEFAULT_TOPOLOGY_PLUGIN     "topology/3d_torus"
 #else
 #  define DEFAULT_TOPOLOGY_PLUGIN     "topology/none"
diff --git a/src/common/working_cluster.c b/src/common/working_cluster.c
index aaac9d93d50..b31a9fd60d4 100644
--- a/src/common/working_cluster.c
+++ b/src/common/working_cluster.c
@@ -72,20 +72,12 @@ extern bool is_cray_system(void)
 {
 	if (working_cluster_rec)
 		return working_cluster_rec->flags & CLUSTER_FLAG_CRAY;
-#if defined HAVE_ALPS_CRAY || defined HAVE_NATIVE_CRAY
-	return true;
-#endif
-	return false;
-}
 
-extern bool is_alps_cray_system(void)
-{
-	if (working_cluster_rec)
-		return working_cluster_rec->flags & CLUSTER_FLAG_CRAY_A;
-#ifdef HAVE_ALPS_CRAY
+#ifdef HAVE_NATIVE_CRAY
 	return true;
-#endif
+#else
 	return false;
+#endif
 }
 
 extern uint16_t slurmdb_setup_cluster_name_dims(void)
@@ -109,9 +101,6 @@ extern uint32_t slurmdb_setup_cluster_flags(void)
 #ifdef MULTIPLE_SLURMD
 	cluster_flags |= CLUSTER_FLAG_MULTSD;
 #endif
-#ifdef HAVE_ALPS_CRAY
-	cluster_flags |= CLUSTER_FLAG_CRAY_A;
-#endif
 #ifdef HAVE_FRONT_END
 	cluster_flags |= CLUSTER_FLAG_FE;
 #endif
diff --git a/src/common/working_cluster.h b/src/common/working_cluster.h
index aad5e796493..4b9d67c70c8 100644
--- a/src/common/working_cluster.h
+++ b/src/common/working_cluster.h
@@ -49,12 +49,9 @@ extern int * slurmdb_setup_cluster_dim_size(void);
  * in the current working cluster */
 extern uint16_t slurmdb_setup_cluster_name_dims(void);
 
-/* Return true if the working cluster is a Cray system (ALPS or Native) */
+/* Return true if the working cluster is a Cray system */
 extern bool is_cray_system(void);
 
-/* Return true if the working cluster is a ALPS Cray system */
-extern bool is_alps_cray_system(void);
-
 /* Return the architecture flags in the current working cluster */
 extern uint32_t slurmdb_setup_cluster_flags(void);
 
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 71c483a4ac9..57f4a66a11d 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -1142,27 +1142,6 @@ static int _attempt_backfill(void)
 		return SLURM_SUCCESS;
 	}
 
-#ifdef HAVE_ALPS_CRAY
-	/*
-	 * Run a Basil Inventory immediately before setting up the schedule
-	 * plan, to avoid race conditions caused by ALPS node state change.
-	 * Needs to be done with the node-state lock taken.
-	 */
-	START_TIMER;
-	if (select_g_update_basil()) {
-		debug4("backfill: not scheduling due to ALPS");
-		return SLURM_SUCCESS;
-	}
-	END_TIMER;
-	if (debug_flags & DEBUG_FLAG_BACKFILL)
-		info("backfill: ALPS inventory completed, %s", TIME_STR);
-
-	/* The Basil inventory can take a long time to complete. Process
-	 * pending RPCs before starting the backfill scheduling logic */
-	_yield_locks(1000000);
-	if (stop_backfill)
-		return SLURM_SUCCESS;
-#endif
 	(void) bb_g_load_state(false);
 
 	START_TIMER;
diff --git a/src/salloc/Makefile.am b/src/salloc/Makefile.am
index ec4f2fc9bb5..0a779fbdaac 100644
--- a/src/salloc/Makefile.am
+++ b/src/salloc/Makefile.am
@@ -15,12 +15,6 @@ salloc_DEPENDENCIES = $(LIB_SLURM_BUILD)
 salloc_LDADD = \
 	$(convenience_libs)
 
-if HAVE_ALPS_CRAY
-if HAVE_REAL_CRAY
-  salloc_LDADD += -ljob
-endif
-endif
-
 salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
 force:
diff --git a/src/salloc/Makefile.in b/src/salloc/Makefile.in
index f21ba96316e..a5682e6bff4 100644
--- a/src/salloc/Makefile.in
+++ b/src/salloc/Makefile.in
@@ -92,7 +92,6 @@ build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
 bin_PROGRAMS = salloc$(EXEEXT)
-@HAVE_ALPS_CRAY_TRUE@@HAVE_REAL_CRAY_TRUE@am__append_1 = -ljob
 subdir = src/salloc
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_check_compile_flag.m4 \
@@ -486,7 +485,9 @@ AM_CPPFLAGS = -I$(top_srcdir)
 salloc_SOURCES = salloc.c salloc.h opt.c opt.h
 convenience_libs = $(LIB_SLURM) $(DL_LIBS)
 salloc_DEPENDENCIES = $(LIB_SLURM_BUILD)
-salloc_LDADD = $(convenience_libs) $(am__append_1)
+salloc_LDADD = \
+	$(convenience_libs)
+
 salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 all: all-am
 
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index 51e82a3673a..d408139c77a 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -1650,41 +1650,6 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-#if defined(HAVE_ALPS_CRAY)
-	if (getenv("BASIL_RESERVATION_ID") != NULL) {
-		error("BASIL_RESERVATION_ID already set - running salloc "
-		      "within salloc?");
-		return false;
-	}
-	if (saopt.no_shell) {
-		/*
-		 * As long as we are not using srun instead of aprun, this flag
-		 * makes no difference for the operational behaviour of aprun.
-		 */
-		error("--no-shell mode is not supported on Cray (due to srun)");
-		return false;
-	}
-	if (opt.shared && opt.shared != NO_VAL16) {
-		info("Oversubscribing resources is not supported on Cray/ALPS systems");
-		opt.shared = false;
-	}
-	if (opt.overcommit) {
-		info("Oversubscribing is not supported on Cray.");
-		opt.overcommit = false;
-	}
-	if (!saopt.wait_all_nodes)
-		info("Cray needs --wait-all-nodes to wait on ALPS reservation");
-	saopt.wait_all_nodes = 1;
-	if (saopt.kill_command_signal_set) {
-		/*
-		 * Disabled to avoid that the user supplies a weaker signal that
-		 * could cause the child processes not to terminate.
-		 */
-		info("The --kill-command is not supported on Cray.");
-		saopt.kill_command_signal_set = false;
-	}
-#endif
-
 	if ((opt.pn_min_memory > -1) && (opt.mem_per_cpu > -1)) {
 		if (opt.pn_min_memory < opt.mem_per_cpu) {
 			info("mem < mem-per-cpu - resizing mem to be equal "
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index e3c5ecaece9..d34114572d0 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -74,19 +74,6 @@
 #include "src/salloc/salloc.h"
 #include "src/salloc/opt.h"
 
-#if defined(HAVE_ALPS_CRAY)
-#include "src/common/node_select.h"
-
-#ifdef HAVE_REAL_CRAY
-/*
- * On Cray installations, the libjob headers are not automatically installed
- * by default, while libjob.so always is, and kernels are > 2.6. Hence it is
- * simpler to just duplicate the single declaration here.
- */
-extern uint64_t job_getjid(pid_t pid);
-#endif
-#endif
-
 #ifndef __USE_XOPEN_EXTENDED
 extern pid_t getpgid(pid_t pid);
 #endif
@@ -311,14 +298,10 @@ int main(int argc, char **argv)
 		 * after first making sure stdin is not redirected.
 		 */
 	} else if ((tpgid = tcgetpgrp(STDIN_FILENO)) < 0) {
-#ifdef HAVE_ALPS_CRAY
-		verbose("no controlling terminal");
-#else
 		if (!saopt.no_shell) {
 			error("no controlling terminal: please set --no-shell");
 			exit(error_exit);
 		}
-#endif
 #ifdef SALLOC_RUN_FOREGROUND
 	} else if ((!saopt.no_shell) && (pid == getpgrp())) {
 		if (tpgid == pid)
@@ -758,25 +741,6 @@ static void _set_submit_dir_env(void)
 /* Returns 0 on success, -1 on failure */
 static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 {
-#if defined HAVE_ALPS_CRAY && defined HAVE_REAL_CRAY
-	uint64_t pagg_id = job_getjid(getpid());
-	/*
-	 * Interactive sessions require pam_job.so in /etc/pam.d/common-session
-	 * since creating sgi_job containers requires root permissions. This is
-	 * the only exception where we allow the fallback of using the SID to
-	 * confirm the reservation (caught later, in do_basil_confirm).
-	 */
-	if (pagg_id == (uint64_t)-1) {
-		error("No SGI job container ID detected - please enable the "
-		      "Cray job service via /etc/init.d/job");
-	} else {
-		if (!desc->select_jobinfo)
-			desc->select_jobinfo = select_g_select_jobinfo_alloc();
-
-		select_g_select_jobinfo_set(desc->select_jobinfo,
-					    SELECT_JOBDATA_PAGG_ID, &pagg_id);
-	}
-#endif
 	desc->contiguous = opt.contiguous ? 1 : 0;
 	if (opt.core_spec != NO_VAL16)
 		desc->core_spec = opt.core_spec;
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index a0243a9081d..5dfc9e070d0 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -2634,7 +2634,7 @@ static void _parse_pbs_resource_list(char *rl)
 								temp, true);
 				xfree(temp);
 			}
-#if defined(HAVE_ALPS_CRAY) || defined(HAVE_NATIVE_CRAY)
+#ifdef HAVE_NATIVE_CRAY
 			/*
 			 * NB: no "mppmem" here since it specifies per-PE memory units,
 			 *     whereas Slurm uses per-node and per-CPU memory units.
@@ -2676,7 +2676,7 @@ static void _parse_pbs_resource_list(char *rl)
 				opt.ntasks_set = true;
 			}
 			xfree(temp);
-#endif	/* HAVE_ALPS_CRAY || HAVE_NATIVE_CRAY */
+#endif /* HAVE_NATIVE_CRAY */
 		} else if (!xstrncasecmp(rl+i, "naccelerators=", 14)) {
 			i += 14;
 			temp = _get_pbs_option_value(rl, &i, ',');
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index b1948314f60..ae5f72b4055 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -66,7 +66,6 @@
 #define MAX_RETRIES 15
 
 static void  _add_bb_to_script(char **script_body, char *burst_buffer_file);
-static int   _check_cluster_specific_settings(job_desc_msg_t *desc);
 static void  _env_merge_filter(job_desc_msg_t *desc);
 static int   _fill_job_desc_from_opts(job_desc_msg_t *desc);
 static void *_get_script_buffer(const char *filename, int *size);
@@ -247,13 +246,6 @@ int main(int argc, char **argv)
 		}
 	}
 
-	if (job_req_list && is_alps_cray_system()) {
-		info("Heterogeneous jobs not supported on Cray/ALPS systems");
-		exit(1);
-	}
-	if (_check_cluster_specific_settings(desc) != SLURM_SUCCESS)
-		exit(error_exit);
-
 	if (sbopt.test_only) {
 		if (job_req_list)
 			rc = slurm_pack_job_will_run(job_req_list);
@@ -508,34 +500,6 @@ static void _env_merge_filter(job_desc_msg_t *desc)
 	}
 }
 
-/* Returns SLURM_ERROR if settings are invalid for chosen cluster */
-static int _check_cluster_specific_settings(job_desc_msg_t *req)
-{
-	int rc = SLURM_SUCCESS;
-
-	if (is_alps_cray_system()) {
-		/*
-		 * Fix options and inform user, but do not abort submission.
-		 */
-		if (req->shared && (req->shared != NO_VAL16)) {
-			info("--share is not supported on Cray/ALPS systems.");
-			req->shared = NO_VAL16;
-		}
-		if (req->overcommit && (req->overcommit != NO_VAL8)) {
-			info("--overcommit is not supported on Cray/ALPS "
-			     "systems.");
-			req->overcommit = false;
-		}
-		if (req->wait_all_nodes &&
-		    (req->wait_all_nodes != NO_VAL16)) {
-			info("--wait-all-nodes is handled automatically on "
-			     "Cray/ALPS systems.");
-			req->wait_all_nodes = NO_VAL16;
-		}
-	}
-	return rc;
-}
-
 /* Returns 0 on success, -1 on failure */
 static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 {
diff --git a/src/sbcast/sbcast.c b/src/sbcast/sbcast.c
index bad0591a6e2..0bb1e7ce963 100644
--- a/src/sbcast/sbcast.c
+++ b/src/sbcast/sbcast.c
@@ -74,10 +74,6 @@ int main(int argc, char **argv)
 	log_options_t opts = LOG_OPTS_STDERR_ONLY;
 	log_init("sbcast", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
-#ifdef HAVE_ALPS_CRAY
-	error("The sbcast command is not supported on Cray systems");
-	return 1;
-#endif
 	slurm_conf_init(NULL);
 	route_init(NULL);
 	parse_command_line(argc, argv);
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 3c1218af7a3..0d0073ab44d 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -489,13 +489,9 @@ int main(int argc, char **argv)
 	} else if (backup_inx > 0) {
 		slurmctld_primary = 0;
 
-#ifdef HAVE_ALPS_CRAY
-		slurmctld_config.scheduling_disabled = true;
-#else
 		if (xstrcasestr(slurmctld_conf.sched_params,
 				"no_backup_scheduling"))
 			slurmctld_config.scheduling_disabled = true;
-#endif
 	}
 
 	/*
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index db73bccdb7a..90f4b074ddf 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -11321,15 +11321,10 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		/* Used by scontrol just to get current configuration info */
 		job_specs->min_nodes = NO_VAL;
 	}
-#if defined(HAVE_ALPS_CRAY)
-	if ((job_specs->min_nodes != NO_VAL) &&
-	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
-#else
 	if ((job_specs->min_nodes != NO_VAL) &&
 	    (job_specs->min_nodes > job_ptr->node_cnt) &&
 	    !select_g_job_expand_allow() &&
 	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
-#endif
 		info("Change of size for %pJ not supported", job_ptr);
 		error_code = ESLURM_NOT_SUPPORTED;
 		goto fini;
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index a9569e0d37c..2f047942a39 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -143,11 +143,7 @@ static int sched_pend_thread = 0;
 static bool sched_running = false;
 static struct timeval sched_last = {0, 0};
 static uint32_t max_array_size = NO_VAL;
-#ifdef HAVE_ALPS_CRAY
-static int sched_min_interval = 1000000;
-#else
 static int sched_min_interval = 2;
-#endif
 
 static int bb_array_stage_cnt = 10;
 extern diag_stats_t slurmctld_diag_stats;
@@ -1433,21 +1429,6 @@ static int _schedule(uint32_t job_limit)
 		goto out;
 	}
 
-
-#ifdef HAVE_ALPS_CRAY
-	/*
-	 * Run a Basil Inventory immediately before scheduling, to avoid
-	 * race conditions caused by ALPS node state change (caused e.g.
-	 * by the node health checker).
-	 * This relies on the above write lock for the node state.
-	 */
-	if (select_g_update_basil()) {
-		unlock_slurmctld(job_write_lock);
-		sched_debug3("not scheduling due to ALPS");
-		goto out;
-	}
-#endif
-
 	part_cnt = list_count(part_list);
 	failed_parts = xmalloc(sizeof(struct part_record *) * part_cnt);
 	failed_resv = xmalloc(sizeof(struct slurmctld_resv*) * MAX_FAILED_RESV);
@@ -3815,12 +3796,6 @@ static char **_build_env(struct job_record *job_ptr, bool is_epilog)
 				(const char **) job_ptr->spank_job_env);
 	}
 
-#if defined HAVE_ALPS_CRAY
-	name = select_g_select_jobinfo_xstrdup(job_ptr->select_jobinfo,
-						SELECT_PRINT_RESV_ID);
-	setenvf(&my_env, "BASIL_RESERVATION_ID", "%s", name);
-	xfree(name);
-#endif
 	setenvf(&my_env, "SLURM_JOB_ACCOUNT", "%s", job_ptr->account);
 	if (job_ptr->details) {
 		setenvf(&my_env, "SLURM_JOB_CONSTRAINTS",
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index a4fbaa15f75..9852b189843 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -2154,12 +2154,6 @@ extern int drain_nodes(char *nodes, char *reason, uint32_t reason_uid)
 		return ESLURM_INVALID_NODE_NAME;
 	}
 
-#ifdef HAVE_ALPS_CRAY
-	error("We cannot drain nodes on a Cray/ALPS system, "
-	      "use native Cray tools such as xtprocadmin(8).");
-	return SLURM_SUCCESS;
-#endif
-
 	if ( (host_list = hostlist_create (nodes)) == NULL) {
 		error ("hostlist_create error on %s: %m", nodes);
 		return ESLURM_INVALID_NODE_NAME;
@@ -3130,10 +3124,8 @@ extern int validate_nodes_via_front_end(
 
 		if (IS_NODE_NO_RESPOND(node_ptr)) {
 			update_node_state = true;
-#ifndef HAVE_ALPS_CRAY
 			/* This is handled by the select/cray plugin */
 			node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
-#endif
 			node_ptr->node_state &= (~NODE_STATE_POWER_UP);
 		}
 
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 4e7b2b0bd35..7b95261c55d 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -1394,20 +1394,6 @@ static void _slurm_rpc_allocate_pack(slurm_msg_t * msg)
 		if (error_code)
 			break;
 
-#if HAVE_ALPS_CRAY
-		/*
-		 * Catch attempts to nest salloc sessions. It is not possible to
-		 * use an ALPS session which has the same alloc_sid, it fails
-		 * even if PAGG container IDs are used.
-		 */
-		if (allocated_session_in_use(job_desc_msg)) {
-			error_code = ESLURM_RESERVATION_BUSY;
-			error("attempt to nest ALPS allocation on %s:%d by uid=%d",
-			      job_desc_msg->alloc_node, job_desc_msg->alloc_sid,
-			      uid);
-			break;
-		}
-#endif
 		dump_job_desc(job_desc_msg);
 
 		job_ptr = NULL;
@@ -1605,18 +1591,6 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 	if (err_msg)
 		job_submit_user_msg = xstrdup(err_msg);
 
-#if HAVE_ALPS_CRAY
-	/*
-	 * Catch attempts to nest salloc sessions. It is not possible to use an
-	 * ALPS session which has the same alloc_sid, it fails even if PAGG
-	 * container IDs are used.
-	 */
-	if (allocated_session_in_use(job_desc_msg)) {
-		error_code = ESLURM_RESERVATION_BUSY;
-		error("attempt to nest ALPS allocation on %s:%d by uid=%d",
-			job_desc_msg->alloc_node, job_desc_msg->alloc_sid, uid);
-	}
-#endif
 	if (error_code) {
 		reject_job = true;
 	} else if (!slurm_get_peer_addr(msg->conn_fd, &resp_addr)) {
@@ -2581,24 +2555,12 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t *msg,
 		     msg_title, nodes,
 		     slurm_strerror(comp_msg->slurm_rc));
 		comp_msg->slurm_rc = SLURM_SUCCESS;
-#ifdef HAVE_ALPS_CRAY
-	} else if (comp_msg->slurm_rc == ESLURM_RESERVATION_NOT_USABLE) {
-		/*
-		 * Confirmation of ALPS reservation failed.
-		 *
-		 * This is non-fatal, it may be a transient error (e.g. ALPS
-		 * temporary unavailable). Give job one more chance to run.
-		 */
-		error("ALPS reservation for JobId=%u failed: %s",
-			comp_msg->job_id, slurm_strerror(comp_msg->slurm_rc));
-		dump_job = job_requeue = true;
-#endif
-	/* Handle non-fatal errors here. All others drain the node. */
 	} else if ((comp_msg->slurm_rc == SLURM_COMMUNICATIONS_SEND_ERROR) ||
 		   (comp_msg->slurm_rc == ESLURM_USER_ID_MISSING) ||
 		   (comp_msg->slurm_rc == ESLURMD_UID_NOT_FOUND)  ||
 		   (comp_msg->slurm_rc == ESLURMD_GID_NOT_FOUND)  ||
 		   (comp_msg->slurm_rc == ESLURMD_INVALID_ACCT_FREQ)) {
+		/* Handle non-fatal errors here. All others drain the node. */
 		error("Slurmd error running JobId=%u on %s=%s: %s",
 		      comp_msg->job_id, msg_title, nodes,
 		      slurm_strerror(comp_msg->slurm_rc));
@@ -2762,7 +2724,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		return;
 	}
 
-#if defined HAVE_FRONT_END && !defined HAVE_ALPS_CRAY
+#if defined HAVE_FRONT_END
 	/* Limited job step support */
 	/* Non-super users not permitted to run job steps on front-end.
 	 * A single slurmd can not handle a heavy load. */
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 54f38837c24..ecaede86e52 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1917,20 +1917,6 @@ static void _pick_step_cores(struct step_record *step_ptr,
 	}
 }
 
-#ifdef HAVE_ALPS_CRAY
-/* Return the total cpu count on a given node index */
-static int _get_node_cpus(int node_inx)
-{
-	struct node_record *node_ptr;
-
-	node_ptr = node_record_table_ptr + node_inx;
-	if (slurmctld_conf.fast_schedule)
-		return node_ptr->config_ptr->cpus;
-
-	return node_ptr->cpus;
-}
-#endif
-
 /* Update a job's record of allocated CPUs when a job step gets scheduled */
 extern void step_alloc_lps(struct step_record *step_ptr)
 {
@@ -1984,17 +1970,11 @@ extern void step_alloc_lps(struct step_record *step_ptr)
 		step_node_inx++;
 		if (job_node_inx >= job_resrcs_ptr->nhosts)
 			fatal("step_alloc_lps: node index bad");
-#ifdef HAVE_ALPS_CRAY
-		/* Since with alps cray you can only run 1 job per node
-		   return all CPUs as being allocated.
-		*/
-		cpus_alloc = _get_node_cpus(step_node_inx);
-#else
+
 		/* NOTE: The --overcommit option can result in
 		 * cpus_used[] having a higher value than cpus[] */
 		cpus_alloc = step_ptr->step_layout->tasks[step_node_inx] *
 			     step_ptr->cpus_per_task;
-#endif
 		job_resrcs_ptr->cpus_used[job_node_inx] += cpus_alloc;
 		gres_plugin_step_alloc(step_ptr->gres_list, job_ptr->gres_list,
 				       job_node_inx, job_ptr->job_id,
@@ -2111,15 +2091,8 @@ static void _step_dealloc_lps(struct step_record *step_ptr)
 		step_node_inx++;
 		if (job_node_inx >= job_resrcs_ptr->nhosts)
 			fatal("_step_dealloc_lps: node index bad");
-#ifdef HAVE_ALPS_CRAY
-		/* Since with alps cray you can only run 1 job per node
-		   return all CPUs as being allocated.
-		*/
-		cpus_alloc = _get_node_cpus(step_node_inx);
-#else
 		cpus_alloc = step_ptr->step_layout->tasks[step_node_inx] *
 			     step_ptr->cpus_per_task;
-#endif
 		if (job_resrcs_ptr->cpus_used[job_node_inx] >= cpus_alloc) {
 			job_resrcs_ptr->cpus_used[job_node_inx] -= cpus_alloc;
 		} else {
@@ -2339,12 +2312,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 	List step_gres_list = (List) NULL;
 	dynamic_plugin_data_t *select_jobinfo = NULL;
 	uint32_t task_dist;
-
-#ifdef HAVE_ALPS_CRAY
-	uint32_t resv_id = 0;
-#else
 	uint32_t max_tasks;
-#endif
 	*new_step_record = NULL;
 	job_ptr = find_job_record (step_specs->job_id);
 	if (job_ptr == NULL)
@@ -2444,11 +2412,6 @@ step_create(job_step_create_request_msg_t *step_specs,
 	if (job_ptr->next_step_id >= slurmctld_conf.max_step_cnt)
 		return ESLURM_STEP_LIMIT;
 
-#ifdef HAVE_ALPS_CRAY
-	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID, &resv_id);
-#endif
-
 	/* if the overcommit flag is checked, we 0 set cpu_count=0
 	 * which makes it so we don't check to see the available cpus
 	 */
@@ -2508,10 +2471,6 @@ step_create(job_step_create_request_msg_t *step_specs,
 	}
 	_set_def_cpu_bind(job_ptr);
 
-#ifdef HAVE_ALPS_CRAY
-	select_g_select_jobinfo_set(select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID, &resv_id);
-#endif
 	node_count = bit_set_count(nodeset);
 	if (step_specs->num_tasks == NO_VAL) {
 		if (step_specs->cpu_count != NO_VAL)
@@ -2520,7 +2479,6 @@ step_create(job_step_create_request_msg_t *step_specs,
 			step_specs->num_tasks = node_count;
 	}
 
-#if (!defined HAVE_ALPS_CRAY)
 	max_tasks = node_count * slurmctld_conf.max_tasks_per_node;
 	if (step_specs->num_tasks > max_tasks) {
 		error("step has invalid task count: %u max is %u",
@@ -2530,7 +2488,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 		select_g_select_jobinfo_free(select_jobinfo);
 		return ESLURM_BAD_TASK_COUNT;
 	}
-#endif
+
 	step_ptr = _create_step_record(job_ptr, protocol_version);
 	if (step_ptr == NULL) {
 		FREE_NULL_LIST(step_gres_list);
@@ -2998,7 +2956,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 	time_t begin_time, run_time;
 	bitstr_t *pack_bitstr;
 
-#if defined HAVE_FRONT_END && (!defined HAVE_ALPS_CRAY)
+#if defined HAVE_FRONT_END
 	/* On front-end systems, the steps only execute on one node.
 	 * We need to make them appear like they are running on the job's
 	 * entire allocation (which they really are). */
@@ -3519,22 +3477,10 @@ extern int step_partial_comp(step_complete_msg_t *req, uid_t uid,
 	if (!step_ptr->exit_node_bitmap) {
 		/* initialize the node bitmap for exited nodes */
 		nodes = bit_set_count(step_ptr->step_node_bitmap);
-#if defined HAVE_ALPS_CRAY
-		/* For BGQ we only have 1 real task, so if it exits,
-		   the whole step is ending as well.
-		*/
-		req->range_last = nodes - 1;
-#endif
 		step_ptr->exit_node_bitmap = bit_alloc(nodes);
 		step_ptr->exit_code = req->step_rc;
 	} else {
 		nodes = bit_size(step_ptr->exit_node_bitmap);
-#if defined HAVE_ALPS_CRAY
-		/* For BGQ we only have 1 real task, so if it exits,
-		   the whole step is ending as well.
-		*/
-		req->range_last = nodes - 1;
-#endif
 		if ((req->step_rc == SIG_OOM) ||
 		    (req->step_rc > step_ptr->exit_code))
 			step_ptr->exit_code = req->step_rc;
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index bc13c902563..3ec887dedbf 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -2205,10 +2205,6 @@ static void _rpc_prolog(slurm_msg_t *msg)
 		job_env.spank_job_env_size = req->spank_job_env_size;
 		job_env.uid = req->uid;
 		job_env.user_name = req->user_name;
-#if defined(HAVE_ALPS_CRAY)
-		job_env.resv_id = select_g_select_jobinfo_xstrdup(
-			req->select_jobinfo, SELECT_PRINT_RESV_ID);
-#endif
 		if ((rc = container_g_create(req->job_id)))
 			error("container_g_create(%u): %m", req->job_id);
 		else
@@ -2387,10 +2383,6 @@ _rpc_batch_job(slurm_msg_t *msg, bool new_msg)
 		/*
 	 	 * Run job prolog on this node
 	 	 */
-#if defined(HAVE_ALPS_CRAY)
-		job_env.resv_id = select_g_select_jobinfo_xstrdup(
-			req->select_jobinfo, SELECT_PRINT_RESV_ID);
-#endif
 		if ((rc = container_g_create(req->job_id)))
 			error("container_g_create(%u): %m", req->job_id);
 		else
@@ -5126,11 +5118,6 @@ _rpc_abort_job(slurm_msg_t *msg)
 	job_env.spank_job_env_size = req->spank_job_env_size;
 	job_env.uid = req->job_uid;
 
-#if defined(HAVE_ALPS_CRAY)
-	job_env.resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo,
-							  SELECT_PRINT_RESV_ID);
-#endif
-
 	_run_epilog(&job_env);
 
 	if (container_g_delete(req->job_id))
@@ -5569,10 +5556,6 @@ _rpc_terminate_job(slurm_msg_t *msg)
 	job_env.spank_job_env_size = req->spank_job_env_size;
 	job_env.uid = req->job_uid;
 
-#if defined(HAVE_ALPS_CRAY)
-	job_env.resv_id = select_g_select_jobinfo_xstrdup(
-		req->select_jobinfo, SELECT_PRINT_RESV_ID);
-#endif
 	rc = _run_epilog(&job_env);
 	xfree(job_env.resv_id);
 
@@ -5867,11 +5850,6 @@ _build_env(job_env_t *job_env)
 	if (job_env->partition)
 		setenvf(&env, "SLURM_JOB_PARTITION", "%s", job_env->partition);
 
-	if (job_env->resv_id) {
-#if defined(HAVE_ALPS_CRAY)
-		setenvf(&env, "BASIL_RESERVATION_ID", "%s", job_env->resv_id);
-#endif
-	}
 	return env;
 }
 
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index 160b5fa7f75..cfe6351ce27 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -338,56 +338,6 @@ static uint32_t _get_exit_code(stepd_step_rec_t *job)
 	return step_rc;
 }
 
-#ifdef HAVE_ALPS_CRAY
-/*
- * Kludge to better inter-operate with ALPS layer:
- * - CONFIRM method requires the SID of the shell executing the job script,
- * - RELEASE method is more robustly called from stepdmgr.
- *
- * To avoid calling the same select/cray plugin function also in slurmctld,
- * we use the following convention:
- * - only job_id, job_state, alloc_sid, and select_jobinfo set to non-NULL,
- * - batch_flag is 0 (corresponding call in slurmctld uses batch_flag = 1),
- * - job_state set to the unlikely value of 'NO_VAL'.
- */
-static int _call_select_plugin_from_stepd(stepd_step_rec_t *job,
-					  uint64_t pagg_id,
-					  int (*select_fn)(struct job_record *))
-{
-	struct job_record fake_job_record = {0};
-	int rc;
-
-	fake_job_record.job_id		= job->jobid;
-	fake_job_record.job_state	= NO_VAL;
-	fake_job_record.select_jobinfo	= select_g_select_jobinfo_alloc();
-	select_g_select_jobinfo_set(fake_job_record.select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID, &job->resv_id);
-	if (pagg_id)
-		select_g_select_jobinfo_set(fake_job_record.select_jobinfo,
-					    SELECT_JOBDATA_PAGG_ID, &pagg_id);
-	rc = (*select_fn)(&fake_job_record);
-	select_g_select_jobinfo_free(fake_job_record.select_jobinfo);
-	return rc;
-}
-
-static int _select_cray_plugin_job_ready(stepd_step_rec_t *job)
-{
-	uint64_t pagg_id = proctrack_g_find(job->jmgr_pid);
-
-	if (pagg_id == 0) {
-		error("no PAGG ID: job service disabled on this host?");
-		/*
-		 * If this process is not attached to a container, there is no
-		 * sense in trying to use the SID as fallback, since the call to
-		 * proctrack_g_add() in _fork_all_tasks() will fail later.
-		 * Hence drain the node until sgi_job returns proper PAGG IDs.
-		 */
-		return READY_JOB_FATAL;
-	}
-	return _call_select_plugin_from_stepd(job, pagg_id, select_g_job_ready);
-}
-#endif
-
 /*
  * Send batch exit code to slurmctld. Non-zero rc will DRAIN the node.
  */
@@ -399,10 +349,6 @@ batch_finish(stepd_step_rec_t *job, int rc)
 	if (job->argv[0] && (unlink(job->argv[0]) < 0))
 		error("unlink(%s): %m", job->argv[0]);
 
-#ifdef HAVE_ALPS_CRAY
-	_call_select_plugin_from_stepd(job, 0, select_g_job_fini);
-#endif
-
 	if (job->aborted) {
 		if ((job->stepid == NO_VAL) ||
 		    (job->stepid == SLURM_BATCH_SCRIPT)) {
@@ -1289,39 +1235,6 @@ job_manager(stepd_step_rec_t *job)
 						    (void *)&xcpuinfo_abs_to_mac);
 	}
 
-#ifdef HAVE_ALPS_CRAY
-	/*
-	 * Note that the previously called proctrack_g_create function is
-	 * mandatory since the select/cray plugin needs the job container
-	 * ID in order to CONFIRM the ALPS reservation.
-	 * It is not a good idea to perform this setup in _fork_all_tasks(),
-	 * since any transient failure of ALPS (which can happen in practice)
-	 * will then set the frontend node to DRAIN.
-	 *
-	 * ALso note that we do not check the reservation for batch jobs with
-	 * a reservation ID of zero and no CPUs. These are Slurm job
-	 * allocations containing no compute nodes and thus have no ALPS
-	 * reservation.
-	 */
-	if (!job->batch || job->resv_id || job->cpus) {
-		rc = _select_cray_plugin_job_ready(job);
-		if (rc != SLURM_SUCCESS) {
-			/*
-			 * Transient error: slurmctld knows this condition to
-			 * mean that the ALPS (not the Slurm) reservation
-			 * failed and tries again.
-			 */
-			if (rc == READY_JOB_ERROR)
-				rc = ESLURM_RESERVATION_NOT_USABLE;
-			else
-				rc = ESLURMD_SETUP_ENVIRONMENT_ERROR;
-			error("could not confirm ALPS reservation #%u",
-			      job->resv_id);
-			goto fail1;
-		}
-	}
-#endif
-
 	debug2("Before call to spank_init()");
 	if (spank_init (job) < 0) {
 		error ("Plugin stack initialization failed.");
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 222823b7312..b022188b555 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -423,19 +423,6 @@ extern stepd_step_rec_t *stepd_step_rec_create(launch_tasks_request_msg_t *msg,
 					     job->job_mem);
 	}
 
-#ifdef HAVE_ALPS_CRAY
-	/*
-	 * This is only used for Cray emulation mode where slurmd is used to
-	 * launch job steps. On a real Cray system, ALPS is used to launch
-	 * the tasks instead of SLURM. Slurm's task launch RPC does NOT
-	 * contain the reservation ID, so just use some non-zero value here
-	 * for testing purposes.
-	 */
-	job->resv_id = 1;
-	select_g_select_jobinfo_set(msg->select_jobinfo, SELECT_JOBDATA_RESV_ID,
-				    &job->resv_id);
-#endif
-
 	/* only need these values on the extern step, don't copy otherwise */
 	if ((msg->job_step_id == SLURM_EXTERN_CONT) && msg->x11) {
 		job->x11 = msg->x11;
@@ -582,11 +569,6 @@ batch_stepd_step_rec_create(batch_job_launch_msg_t *msg)
 	job->task[0]->argc = job->argc;
 	job->task[0]->argv = job->argv;
 
-#ifdef HAVE_ALPS_CRAY
-	select_g_select_jobinfo_get(msg->select_jobinfo, SELECT_JOBDATA_RESV_ID,
-				    &job->resv_id);
-#endif
-
 	return job;
 }
 
diff --git a/src/srun/libsrun/allocate.c b/src/srun/libsrun/allocate.c
index 28aad9c80f1..fc61f16076a 100644
--- a/src/srun/libsrun/allocate.c
+++ b/src/srun/libsrun/allocate.c
@@ -65,15 +65,6 @@
 #include "opt.h"
 #include "launch.h"
 
-#if defined HAVE_ALPS_CRAY && defined HAVE_REAL_CRAY
-/*
- * On Cray installations, the libjob headers are not automatically installed
- * by default, while libjob.so always is, and kernels are > 2.6. Hence it is
- * simpler to just duplicate the single declaration here.
- */
-extern uint64_t job_getjid(pid_t pid);
-#endif
-
 #define MAX_ALLOC_WAIT	60	/* seconds */
 #define MIN_ALLOC_WAIT	5	/* seconds */
 #define MAX_RETRIES	10
@@ -753,7 +744,7 @@ static job_desc_msg_t *_job_desc_msg_create_from_opts(slurm_opt_t *opt_local)
 	xassert(srun_opt);
 
 	slurm_init_job_desc_msg(j);
-#if defined HAVE_ALPS_CRAY && defined HAVE_REAL_CRAY
+#ifdef HAVE_REAL_CRAY
 	static bool sgi_err_logged = false;
 	uint64_t pagg_id = job_getjid(getpid());
 	/*
diff --git a/src/srun/libsrun/opt.c b/src/srun/libsrun/opt.c
index 98fb54c71cc..1aeb0d292b3 100644
--- a/src/srun/libsrun/opt.c
+++ b/src/srun/libsrun/opt.c
@@ -214,7 +214,6 @@
 extern char **environ;
 
 /*---- global variables, defined in opt.h ----*/
-resource_allocation_response_msg_t *global_resp = NULL;
 int	error_exit = 1;
 int	immediate_exit = 1;
 char *	mpi_type = NULL;
diff --git a/src/srun/libsrun/opt.h b/src/srun/libsrun/opt.h
index 04cd0a84162..78689594ee5 100644
--- a/src/srun/libsrun/opt.h
+++ b/src/srun/libsrun/opt.h
@@ -62,7 +62,6 @@ extern int _verbose;
 extern enum modes mode;
 
 extern int	error_exit;	/* exit code for slurm errors */
-extern resource_allocation_response_msg_t *global_resp;
 extern int	immediate_exit;	/* exit code for --imediate option & busy */
 extern char *	mpi_type;
 extern slurm_opt_t opt;
diff --git a/src/srun/libsrun/srun_job.c b/src/srun/libsrun/srun_job.c
index 9dccf177e8a..fd9615ad607 100644
--- a/src/srun/libsrun/srun_job.c
+++ b/src/srun/libsrun/srun_job.c
@@ -1244,8 +1244,6 @@ extern void create_srun_job(void **p_job, bool *got_alloc,
 				opt_local = list_next(opt_iter);
 				if (!opt_local)
 					break;
-				if (!global_resp)	/* Used by Cray/ALPS */
-					global_resp = resp;
 				_print_job_information(resp);
 				_set_env_vars(resp, ++pack_offset);
 				_set_env_vars2(resp, pack_offset);
@@ -1264,7 +1262,6 @@ extern void create_srun_job(void **p_job, bool *got_alloc,
 		} else {
 			if (!(resp = allocate_nodes(handle_signals, &opt)))
 				exit(error_exit);
-			global_resp = resp;
 			*got_alloc = true;
 			my_job_id = resp->job_id;
 			_print_job_information(resp);
@@ -1294,7 +1291,6 @@ extern void create_srun_job(void **p_job, bool *got_alloc,
 		}
 		xfree(pack_nodelist);
 
-		global_resp = NULL;
 		if (opt_list) {
 			resp_iter = list_iterator_create(job_resp_list);
 			while ((resp = (resource_allocation_response_msg_t *)
@@ -1482,7 +1478,7 @@ static srun_job_t *_job_create_structure(allocation_info_t *ainfo,
  	job->pack_offset = NO_VAL;
 	job->pack_task_offset = NO_VAL;
 
-#if defined HAVE_FRONT_END && !defined HAVE_ALPS_CRAY
+#if defined HAVE_FRONT_END
 	/* Limited job step support */
 	opt_local->overcommit = true;
 	job->nhosts = 1;
diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c
index 1bdb9825168..2ec1e03bb88 100644
--- a/src/sstat/sstat.c
+++ b/src/sstat/sstat.c
@@ -262,11 +262,6 @@ int main(int argc, char **argv)
 	uint32_t stepid = NO_VAL;
 	slurmdb_selected_step_t *selected_step = NULL;
 
-#ifdef HAVE_ALPS_CRAY
-	error("The sstat command is not supported on Cray systems");
-	return 1;
-#endif
-
 	slurm_conf_init(NULL);
 	print_fields_list = list_create(NULL);
 	print_fields_itr = list_iterator_create(print_fields_list);
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index 1886adbabdf..6f0a41d45ae 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -108,7 +108,6 @@ enum {
 	SORTID_ACTION,
 	SORTID_ALLOC,
 	SORTID_ALLOC_NODE,
-	SORTID_ALPS_RESV_ID,
 	SORTID_ARRAY_JOB_ID,
 	SORTID_ARRAY_TASK_ID,
 	SORTID_BATCH,
@@ -213,8 +212,7 @@ enum {
  * take place.  If you choose EDIT_MODEL (means only display a set of
  * known options) create it in function create_model_*.
  */
-static char *_initial_page_opts = ("JobID,Partition,BG_Block,"
-				   "ALPS_Resv_ID,UserID,Name,"
+static char *_initial_page_opts = ("JobID,Partition,UserID,Name,"
 				   "State,Time_Running,Node_Count,NodeList");
 
 static display_data_t display_data_job[] = {
@@ -242,13 +240,6 @@ static display_data_t display_data_job[] = {
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_PACK_JOB_OFFSET, "Pack Job Offset", false,
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
-#ifdef HAVE_ALPS_CRAY
-	{G_TYPE_STRING, SORTID_ALPS_RESV_ID, "ALPS Resv ID", false, EDIT_NONE,
-	 refresh_job, create_model_job, admin_edit_job},
-#else
-	{G_TYPE_STRING, SORTID_ALPS_RESV_ID, NULL, true, EDIT_NONE,
-	 refresh_job, create_model_job, admin_edit_job},
-#endif
 	{G_TYPE_STRING, SORTID_USER_ID, "UserID", false, EDIT_NONE,
 	 refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_GROUP_ID, "GroupID", false, EDIT_NONE,
@@ -1265,16 +1256,6 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_ALLOC_NODE),
 				   tmp_char);
 
-	if (cluster_flags & CLUSTER_FLAG_CRAY_A)
-		add_display_treestore_line(update, treestore, &iter,
-					   find_col_name(display_data_job,
-							 SORTID_ALPS_RESV_ID),
-					   select_g_select_jobinfo_sprint(
-						   job_ptr->select_jobinfo,
-						   tmp_char,
-						   sizeof(tmp_char),
-						   SELECT_PRINT_DATA));
-
 	if (job_ptr->array_task_str ||
 	    (job_ptr->array_task_id != NO_VAL)) {
 		snprintf(tmp_char, sizeof(tmp_char), "%u",
@@ -2370,18 +2351,6 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	gtk_tree_store_set(treestore, iter,
 			   SORTID_NETWORK, job_ptr->network, -1);
 
-	if (cluster_flags & CLUSTER_FLAG_CRAY_A) {
-		char tmp_resv_id[40];
-
-		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
-					       tmp_resv_id, sizeof(tmp_resv_id),
-					       SELECT_PRINT_DATA);
-
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_ALPS_RESV_ID,  tmp_resv_id,
-				   -1);
-	}
-
 	if (check_task &&
 	    (job_ptr->array_task_str ||
 	     ((job_ptr->array_task_id != NO_VAL) || job_ptr->pack_job_id))) {
@@ -4827,20 +4796,6 @@ extern void cluster_change_job(void)
 		if (display_data->id == -1)
 			break;
 
-		if (cluster_flags & CLUSTER_FLAG_CRAY_A) {
-			switch(display_data->id) {
-			case SORTID_ALPS_RESV_ID:
-				display_data->name = "ALPS";
-				break;
-			}
-		} else {
-			switch(display_data->id) {
-			case SORTID_ALPS_RESV_ID:
-				display_data->name = NULL;
-				break;
-			}
-		}
-
 		if (cluster_flags & CLUSTER_FLAG_FED) {
 			switch(display_data->id) {
 			case SORTID_CLUSTER_NAME:
-- 
GitLab