diff --git a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
index 2f087b8a92cf0ef05e5972f0ffe173a7c3a437fd..3ef9d6f30547ba4954691306d8549cf7483d8478 100644
--- a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
+++ b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
@@ -119,12 +119,13 @@ short test runs, it is **recommended to launch your jobs in the background using
 that, you can conveniently put the parameters directly into the job file and submit it via
 `sbatch [options] <job file>`.
 
-Please use a [batch job](../jobs_and_resources/slurm.md) with a configuration, similar to the example below:
+Please use a [batch job](../jobs_and_resources/slurm.md) with a configuration, similar to the
+example below:
 
 ??? example "spark.sbatch"
     ```bash
     #!/bin/bash -l
-    #SBATCH --time=01:00:00
+    #SBATCH --time=00:05:00
     #SBATCH --partition=haswell
     #SBATCH --nodes=2
     #SBATCH --exclusive
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/example-spark.sbatch b/doc.zih.tu-dresden.de/docs/software/misc/example-spark.sbatch
deleted file mode 100644
index 2fcf3aa39b8e66b004fa0fed621475e3200f9d76..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/software/misc/example-spark.sbatch
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --time=00:03:00
-#SBATCH --partition=haswell
-#SBATCH --nodes=1
-#SBATCH --exclusive
-#SBATCH --mem=50G
-#SBATCH -J "example-spark"
-
-ml Spark/3.0.1-Hadoop-2.7-Java-1.8-Python-3.7.4-GCCcore-8.3.0
-
-function myExitHandler () {
-	stop-all.sh
-}
-
-#configuration
-. framework-configure.sh spark $SPARK_HOME/conf
-
-#register cleanup hook in case something goes wrong
-trap myExitHandler EXIT
-
-start-all.sh
-
-spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME/examples/jars/spark-examples_2.12-3.0.1.jar 1000
-
-stop-all.sh
-
-exit 0