diff --git a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
index 8e8a87f47ace289a65169a5e85c5e4a72ca714a9..2f087b8a92cf0ef05e5972f0ffe173a7c3a437fd 100644
--- a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
+++ b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
@@ -119,8 +119,38 @@ short test runs, it is **recommended to launch your jobs in the background using
 that, you can conveniently put the parameters directly into the job file and submit it via
 `sbatch [options] <job file>`.
 
-Please use a [batch job](../jobs_and_resources/slurm.md) similar to
-[example-spark.sbatch](misc/example-spark.sbatch).
+Please use a [batch job](../jobs_and_resources/slurm.md) with a configuration, similar to the example below:
+
+??? example "spark.sbatch"
+    ```bash
+    #!/bin/bash -l
+    #SBATCH --time=01:00:00
+    #SBATCH --partition=haswell
+    #SBATCH --nodes=2
+    #SBATCH --exclusive
+    #SBATCH --mem=60G
+    #SBATCH --job-name="example-spark"
+    
+    ml Spark/3.0.1-Hadoop-2.7-Java-1.8-Python-3.7.4-GCCcore-8.3.0
+    
+    function myExitHandler () {
+        stop-all.sh
+    }
+    
+    #configuration
+    . framework-configure.sh spark $SPARK_HOME/conf
+    
+    #register cleanup hook in case something goes wrong
+    trap myExitHandler EXIT
+    
+    start-all.sh
+    
+    spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME/examples/jars/spark-examples_2.12-3.0.1.jar 1000
+    
+    stop-all.sh
+    
+    exit 0
+    ```
 
 ## Jupyter Notebook