diff --git a/.gitignore b/.gitignore
index 04c7fd320b19a3da2344057a2fd78ef420e71499..24174aef30f4d80865755184ff44cc29458bbfaf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@
 *node_modules
 **venv/
 doc.zih.tu-dresden.de/public/
+*mermaid.min.js
diff --git a/Dockerfile b/Dockerfile
index 4f90282a89dc5b644290dec870f311bfbb753b3e..6c11a4a6f61236cb3db782e43a9f39bad9d2e170 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,7 +12,7 @@ RUN pip install mkdocs>=1.1.2 mkdocs-material>=7.1.0
 # Linter #  
 ##########
 
-RUN apt update && apt install -y nodejs npm aspell git
+RUN apt-get update && apt-get install -y nodejs npm aspell git
 
 RUN npm install -g markdownlint-cli markdown-link-check
 
@@ -32,6 +32,14 @@ gitlab.hrz.tu-chemnitz.de ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ/cSNsKRPrfXCMjl+
 RUN git clone https://gitlab.hrz.tu-chemnitz.de/mago411c--tu-dresden.de/mkdocs_table_plugin.git ~/mkdocs_table_plugin
 RUN cd ~/mkdocs_table_plugin && python setup.py install
 
+#Make sure that mermaid is integrated...
+RUN echo '#!/bin/bash' > /entrypoint.sh
+RUN echo 'test \! -e /docs/tud_theme/javascripts/mermaid.min.js && test -x /docs/util/download-newest-mermaid.js.sh && /docs/util/download-newest-mermaid.js.sh' >> /entrypoint.sh
+RUN echo 'exec "$@"' >> /entrypoint.sh
+RUN chmod u+x /entrypoint.sh
+
 WORKDIR /docs
 
 CMD ["mkdocs", "build", "--verbose", "--strict"]
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
index 1b171e10daf2035ba3ac8d7a2a975515afc34761..b4c84ff840b04540022f16626d18edef0b3efa8f 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
@@ -105,6 +105,7 @@ We follow this rules regarding prompts:
 | `alpha` partition      | `marie@alpha$`   |
 | `romeo` partition      | `marie@romeo$`   |
 | `julia` partition      | `marie@julia$`   |
+| `dcv` partition        | `marie@dcv$`     |
 | Localhost              | `marie@local$`   |
 
 *Remarks:*
@@ -197,9 +198,9 @@ Line numbers can be added via
 ```bash linenums="1"
 #!/bin/bash
 
-#SBATCH -N 1
-#SBATCH -n 23
-#SBATCH -t 02:10:00
+#SBATCH --nodes=1
+#SBATCH --ntasks=23
+#SBATCH --time=02:10:00
 
 srun a.out
 ```
@@ -215,9 +216,9 @@ Specific Lines can be highlighted by using
 ```bash hl_lines="2 3"
 #!/bin/bash
 
-#SBATCH -N 1
-#SBATCH -n 23
-#SBATCH -t 02:10:00
+#SBATCH --nodes=1
+#SBATCH --ntasks=23
+#SBATCH --time=02:10:00
 
 srun a.out
 ```
diff --git a/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md b/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
index 568724e6537962f9e227b58542f91d18ef72a44f..15382289e3b0b7abcfb9621fcbb33cde302dc1fc 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
@@ -31,13 +31,15 @@ Add the original repository as a so-called remote:
     1. `git pull origin preview`
     1. `git pull upstream-zih preview` (only required when you forked the project)
 1. Create a new feature branch for you to work in. Ideally, name it like the file you want to
-modify or the issue you want to work on, e.g.: `git checkout -b issue-174`. (If you are uncertain
-about the name of a file, please look into `mkdocs.yaml`.)
+modify or the issue you want to work on, e.g.:
+`git checkout -b 174-check-contribution-documentation` for issue 174 with title "Check contribution
+documentation". (If you are uncertain about the name of a file, please look into `mkdocs.yaml`.)
 1. Improve the documentation with your preferred editor, i.e. add new files and correct mistakes.
 1. Use `git add <FILE>` to select your improvements for the next commit.
 1. Commit the changes with `git commit -m "<DESCRIPTION>"`. The description should be a meaningful
 description of your changes. If you work on an issue, please also add "Closes 174" (for issue 174).
-1. Push the local changes to the GitLab server, e.g. with `git push origin issue-174`.
+1. Push the local changes to the GitLab server, e.g. with
+`git push origin 174-check-contribution-documentation`.
 1. As an output you get a link to create a merge request against the preview branch.
 1. When the merge request is created, a continuous integration (CI) pipeline automatically checks
 your contributions.
@@ -57,6 +59,7 @@ Building a container could be done with the following steps:
 
 ```bash
 cd hpc-wiki
+doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh
 docker build -t hpc-compendium .
 ```
 
@@ -64,7 +67,7 @@ To avoid a lot of retyping, use the following in your shell:
 
 ```bash
 alias wikiscript="docker run --name=hpc-compendium --rm -w /docs --mount src=$PWD,target=/docs,type=bind hpc-compendium"
-alias wiki="docker run --name=hpc-compendium -p 8000:8000 --rm -w /docs --mount src=$PWD/doc.zih.tu-dresden.de,target=/docs,type=bind hpc-compendium bash -c"
+alias wiki="docker run --name=hpc-compendium -p 8000:8000 --rm -w /docs --mount src=$PWD/doc.zih.tu-dresden.de,target=/docs,type=bind hpc-compendium"
 ```
 
 ## Working with the Docker Container
@@ -76,7 +79,7 @@ Here is a suggestion of a workflow which might be suitable for you.
 The command(s) to start the dockerized web server is this:
 
 ```bash
-wiki "mkdocs serve -a 0.0.0.0:8000"
+wiki mkdocs serve -a 0.0.0.0:8000
 ```
 
 You can view the documentation via `http://localhost:8000` in your browser, now.
@@ -137,7 +140,7 @@ Read on if you want to run a specific check.
 If you want to check whether the markdown files are formatted properly, use the following command:
 
 ```bash
-wiki 'markdownlint docs'
+wiki markdownlint docs
 ```
 
 #### Spell Checker
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
index 9fa70fef2a9db246f0d04dc92a33533589099673..98e37b5acedbe228def7a3094d3167bf98cce62b 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
@@ -6,6 +6,28 @@ setup experiments, and
 edit and prepare jobs. The login nodes are not suited for computational work! From the login nodes,
 you can interact with the batch system, e.g., submit and monitor your jobs.
 
+A typical workflow would look like this:
+
+```mermaid
+sequenceDiagram
+    user ->>+ login node: run programm
+    login node ->> login node: kill after 5 min
+    login node ->>- user: Killed!
+    user ->> login node: salloc [...]
+    login node ->> Slurm: Request resources
+    Slurm ->> user: resources
+    user ->>+ allocated resources: srun [options] [command]
+    allocated resources ->> allocated resources: run command (on allocated nodes)
+    allocated resources ->>- user: program finished
+    user ->>+ allocated resources: srun [options] [further_command]
+    allocated resources ->> allocated resources: run further command
+    allocated resources ->>- user: program finished
+    user ->>+ allocated resources: srun [options] [further_command]
+    allocated resources ->> allocated resources: run further command
+    Slurm ->> allocated resources: Job limit reached/exceeded
+    allocated resources ->>- user: Job limit reached
+```
+
 ??? note "Batch System"
 
     The batch system is the central organ of every HPC system users interact with its compute
@@ -376,13 +398,13 @@ If you want to use your reservation, you have to add the parameter
 
 ## Node Features for Selective Job Submission
 
-The nodes in our HPC system are becoming more diverse in multiple aspects: hardware, mounted
-storage, software. The system administrators can describe the set of properties and it is up to the
-user to specify her/his requirements. These features should be thought of as changing over time
+The nodes in our HPC system are becoming more diverse in multiple aspects, e.g, hardware, mounted
+storage, software. The system administrators can describe the set of properties and it is up to you
+as user to specify the requirements. These features should be thought of as changing over time
 (e.g., a filesystem get stuck on a certain node).
 
-A feature can be used with the Slurm option `--constrain` or `-C` like
-`srun -C fs_lustre_scratch2 ...` with `srun` or `sbatch`. Combinations like
+A feature can be used with the Slurm option `-C, --constraint=<ARG>` like
+`srun --constraint=fs_lustre_scratch2 ...` with `srun` or `sbatch`. Combinations like
 `--constraint="fs_beegfs_global0`are allowed. For a detailed description of the possible
 constraints, please refer to the [Slurm documentation](https://slurm.schedmd.com/srun.html).
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
index fecea7ad7a9db2d5395bad6963baba73b4314248..b6ec206cf1950f416e81318daab0c9e0e88ba45a 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
@@ -7,12 +7,12 @@ depend on the type of parallelization and architecture.
 
 ### OpenMP Jobs
 
-An SMP-parallel job can only run within a node, so it is necessary to include the options `-N 1` and
-`-n 1`. The maximum number of processors for an SMP-parallel program is 896 and 56 on partition
-`taurussmp8` and  `smp2`, respectively.  Please refer to the
+An SMP-parallel job can only run within a node, so it is necessary to include the options `--node=1`
+and `--ntasks=1`. The maximum number of processors for an SMP-parallel program is 896 and 56 on
+partition `taurussmp8` and  `smp2`, respectively.  Please refer to the
 [partitions section](partitions_and_limits.md#memory-limits) for up-to-date information. Using the
 option `--cpus-per-task=<N>` Slurm will start one task and you will have `N` CPUs available for your
-job.  An example job file would look like:
+job. An example job file would look like:
 
 !!! example "Job file for OpenMP application"
 
@@ -22,9 +22,9 @@ job.  An example job file would look like:
     #SBATCH --tasks-per-node=1
     #SBATCH --cpus-per-task=8
     #SBATCH --time=08:00:00
-    #SBATCH -J Science1
+    #SBATCH --job-name=Science1
     #SBATCH --mail-type=end
-    #SBATCH --mail-user=your.name@tu-dresden.de
+    #SBATCH --mail-user=<your.email>@tu-dresden.de
 
     export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
     ./path/to/binary
@@ -48,9 +48,9 @@ For MPI-parallel jobs one typically allocates one core per task that has to be s
     #!/bin/bash
     #SBATCH --ntasks=864
     #SBATCH --time=08:00:00
-    #SBATCH -J Science1
+    #SBATCH --job-name=Science1
     #SBATCH --mail-type=end
-    #SBATCH --mail-user=your.name@tu-dresden.de
+    #SBATCH --mail-user=<your.email>@tu-dresden.de
 
     srun ./path/to/binary
     ```
@@ -70,9 +70,9 @@ below.
     #SBATCH --ntasks=4
     #SBATCH --cpus-per-task=1
     #SBATCH --time=01:00:00
-    #SBATCH -J PseudoParallelJobs
+    #SBATCH --job-name=PseudoParallelJobs
     #SBATCH --mail-type=end
-    #SBATCH --mail-user=your.name@tu-dresden.de
+    #SBATCH --mail-user=<your.email>@tu-dresden.de
 
     # The following sleep command was reported to fix warnings/errors with srun by users (feel free to uncomment).
     #sleep 5
@@ -109,7 +109,7 @@ for `sbatch/srun` in this case is `--gres=gpu:[NUM_PER_NODE]` (where `NUM_PER_NO
     #SBATCH --cpus-per-task=6      # use 6 threads per task
     #SBATCH --gres=gpu:1           # use 1 GPU per node (i.e. use one GPU per task)
     #SBATCH --time=01:00:00        # run for 1 hour
-    #SBATCH -A Project1            # account CPU time to Project1
+    #SBATCH --account=p_marie      # account CPU time to project p_marie
 
     srun ./your/cuda/application   # start you application (probably requires MPI to use both nodes)
     ```
@@ -247,7 +247,7 @@ two you might want to use. Since we use cgroups for separation of jobs, your job
 use more resources than requested.*
 
 If you just want to use all available cores in a node, you have to specify how Slurm should organize
-them, like with `-p haswell -c 24` or `-p haswell --ntasks-per-node=24`.
+them, like with `--partition=haswell --cpus-per-tasks=24` or `--partition=haswell --ntasks-per-node=24`.
 
 Here is a short example to ensure that a benchmark is not spoiled by other jobs, even if it doesn't
 use up all resources in the nodes:
@@ -256,13 +256,13 @@ use up all resources in the nodes:
 
     ```Bash
     #!/bin/bash
-    #SBATCH -p haswell
+    #SBATCH --partition=haswell
     #SBATCH --nodes=2
     #SBATCH --ntasks-per-node=2
     #SBATCH --cpus-per-task=8
     #SBATCH --exclusive    # ensure that nobody spoils my measurement on 2 x 2 x 8 cores
     #SBATCH --time=00:10:00
-    #SBATCH -J Benchmark
+    #SBATCH --job-name=Benchmark
     #SBATCH --mail-user=your.name@tu-dresden.de
 
     srun ./my_benchmark
@@ -299,11 +299,11 @@ name specific to the job:
     ```Bash
     #!/bin/bash
     #SBATCH --array 0-9
-    #SBATCH -o arraytest-%A_%a.out
-    #SBATCH -e arraytest-%A_%a.err
+    #SBATCH --output=arraytest-%A_%a.out
+    #SBATCH --error=arraytest-%A_%a.err
     #SBATCH --ntasks=864
     #SBATCH --time=08:00:00
-    #SBATCH -J Science1
+    #SBATCH --job-name=Science1
     #SBATCH --mail-type=end
     #SBATCH --mail-user=your.name@tu-dresden.de
 
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
index afead82a8abfb5ca3826a0110940ea5574a8c1dd..1f6be0614fc728f88212a5192fb1b11277ac8454 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
@@ -269,8 +269,8 @@ since both are running multicore jobs on a **single** node. Below is an example:
 #SBATCH --tasks-per-node=1
 #SBATCH --cpus-per-task=16
 #SBATCH --time=00:10:00
-#SBATCH -o test_Rmpi.out
-#SBATCH -e test_Rmpi.err
+#SBATCH --output=test_Rmpi.out
+#SBATCH --error=test_Rmpi.err
 
 module purge
 module load modenv/scs5
diff --git a/doc.zih.tu-dresden.de/docs/software/distributed_training.md b/doc.zih.tu-dresden.de/docs/software/distributed_training.md
index 41cd1dab3a3ab547efbdf63a60e359b06a8c7611..4e8fc427e71bd28ad1a3b663aba82d11bad088e6 100644
--- a/doc.zih.tu-dresden.de/docs/software/distributed_training.md
+++ b/doc.zih.tu-dresden.de/docs/software/distributed_training.md
@@ -128,10 +128,10 @@ Each worker runs the training loop independently.
     module load modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5 TensorFlow/2.4.1
 
     # On the first node
-    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 0, "type": "worker"}}' srun -w ${NODE_1} -N 1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
+    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 0, "type": "worker"}}' srun --nodelist=${NODE_1} --nodes=1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
 
     # On the second node
-    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 1, "type": "worker"}}' srun -w ${NODE_2} -N 1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
+    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 1, "type": "worker"}}' srun --nodelist=${NODE_2} --nodes=1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
 
     wait
     ```
diff --git a/doc.zih.tu-dresden.de/docs/software/fem_software.md b/doc.zih.tu-dresden.de/docs/software/fem_software.md
index d8bffb0a75eb7a13649e68baa3bda9407f65a9c4..3f9bf79d54d36711560054101536c82dfbbfe000 100644
--- a/doc.zih.tu-dresden.de/docs/software/fem_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/fem_software.md
@@ -59,7 +59,7 @@ Slurm or [writing job files](../jobs_and_resources/slurm.md#job-files).
     #SBATCH --job-name=yyyy         # give a name, what ever you want
     #SBATCH --mail-type=END,FAIL    # send email when the job finished or failed
     #SBATCH --mail-user=<name>@mailbox.tu-dresden.de  # set your email
-    #SBATCH -A p_xxxxxxx            # charge compute time to your project
+    #SBATCH --account=p_marie       # charge compute time to project p_marie
 
 
     # Abaqus has its own MPI
@@ -75,7 +75,7 @@ Slurm or [writing job files](../jobs_and_resources/slurm.md#job-files).
     ```
     4. Control the status of the job
     ```
-    marie@login squeue -u your_login     # in column "ST" (Status) you will find a R=Running or P=Pending (waiting for resources)
+    marie@login squeue --me     # in column "ST" (Status) you will find a R=Running or P=Pending (waiting for resources)
     ```
 
 ## Ansys
@@ -114,7 +114,7 @@ If more time is needed, a CPU has to be allocated like this (see
 
 ```console
 marie@login$ module load ANSYS/<version>
-marie@login$ srun -t 00:30:00 --x11=first [SLURM_OPTIONS] --pty bash
+marie@login$ srun --time=00:30:00 --x11=first [SLURM_OPTIONS] --pty bash
 [...]
 marie@login$ runwb2
 ```
@@ -208,7 +208,7 @@ firewall of ZIH. For further information, please refer to the COMSOL manual.
 
     ```console
     marie@login$ module load COMSOL
-    marie@login$ srun -n 1 -c 4 --mem-per-cpu=2500 -t 8:00 comsol -np 4 server
+    marie@login$ srun --ntasks=1 --cpus-per-task=4 --mem-per-cpu=2500 --time=8:00 comsol -np 4 server
     ```
 
 ??? example "Interactive Job"
@@ -218,7 +218,7 @@ firewall of ZIH. For further information, please refer to the COMSOL manual.
 
     ```console
     marie@login$ module load COMSOL
-    marie@login$ srun -n 1 -c 4 --mem-per-cpu=2500 -t 8:00 --pty --x11=first comsol -np 4
+    marie@login$ srun --ntasks=1 --cpus-per-task=4 --mem-per-cpu=2500 --time=8:00 --pty --x11=first comsol -np 4
     ```
 
     Please make sure, that the option *Preferences* --> Graphics --> *Renedering* is set to *software
diff --git a/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md b/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md
index 14018d374df59375fc8ef78f9309f9b9d0ba003a..9727d2d35f03fa334a79385ffd960625fc958348 100644
--- a/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md
+++ b/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md
@@ -81,8 +81,8 @@ For runs with [Slurm](../jobs_and_resources/slurm.md), please use a script like
 
 ```Bash
 #!/bin/bash
-#SBATCH -t 120
-#SBATCH -n 8
+#SBATCH --time=120
+#SBATCH --ntasks=8
 #SBATCH --ntasks-per-node=2
 ## you have to make sure that an even number of tasks runs on each node !!
 #SBATCH --mem-per-cpu=1900
diff --git a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
index d6ae27186bef6ff2538a8bc9043f0450540ba979..13b623174f21016084917fb2cd424b500727e5f3 100644
--- a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
+++ b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
@@ -139,7 +139,7 @@ can deactivate the conda environment as follows:
     This is an example on partition Alpha. The example creates a virtual environment, and installs
     the package `torchvision` with pip.
     ```console
-    marie@login$ srun --partition=alpha-interactive -N=1 --gres=gpu:1 --time=01:00:00 --pty bash
+    marie@login$ srun --partition=alpha-interactive --nodes=1 --gres=gpu:1 --time=01:00:00 --pty bash
     marie@alpha$ mkdir python-environments                               # please use workspaces
     marie@alpha$ module load modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5 PyTorch
     Module GCC/10.2.0, CUDA/11.1.1, OpenMPI/4.0.5, PyTorch/1.9.0 and 54 dependencies loaded.
diff --git a/doc.zih.tu-dresden.de/docs/software/visualization.md b/doc.zih.tu-dresden.de/docs/software/visualization.md
index 8116af22e79073237c10dfb113cd0910af824455..b1a103a0c1ab1c999a002c2584eefa1a1813916b 100644
--- a/doc.zih.tu-dresden.de/docs/software/visualization.md
+++ b/doc.zih.tu-dresden.de/docs/software/visualization.md
@@ -38,9 +38,10 @@ parallel, if it was built using MPI.
     ```
 
 The resources for the MPI processes have to be allocated via the
-[batch system](../jobs_and_resources/slurm.md) option `-c NUM` (not `-n`, as it would be usually for
-MPI processes). It might be valuable in terms of runtime to bind/pin the MPI processes to hardware.
-A convenient option is `-bind-to core`. All other options can be obtained by
+[batch system](../jobs_and_resources/slurm.md) option `--cpus-per-task=<NUM>` (not `--ntasks=<NUM>`,
+as it would be usual for MPI processes). It might be valuable in terms of runtime to bind/pin the
+MPI processes to hardware.  A convenient option is `-bind-to core`. All other options can be
+obtained by
 
 ```console
 marie@login$ mpiexec -bind-to -help`
@@ -57,8 +58,8 @@ interactive allocation.
     ```Bash
     #!/bin/bash
 
-    #SBATCH -N 1
-    #SBATCH -c 12
+    #SBATCH --nodes=1
+    #SBATCH --cpus-per-task=12
     #SBATCH --time=01:00:00
 
     # Make sure to only use ParaView
@@ -71,7 +72,7 @@ interactive allocation.
 ??? example "Example of interactive allocation using `salloc`"
 
     ```console
-    marie@login$ salloc -N 1 -c 16 --time=01:00:00 bash
+    marie@login$ salloc --nodes=1 --cpus-per-task=16 --time=01:00:00 bash
     salloc: Pending job allocation 336202
     salloc: job 336202 queued and waiting for resources
     salloc: job 336202 has been allocated resources
@@ -102,8 +103,8 @@ cards (GPUs) specified by the device index. For that, make sure to use the modul
     ```Bash
     #!/bin/bash
 
-    #SBATCH -N 1
-    #SBATCH -c 12
+    #SBATCH --nodes=1
+    #SBATCH --cpus-per-task=12
     #SBATCH --gres=gpu:2
     #SBATCH --partition=gpu2
     #SBATCH --time=01:00:00
@@ -133,7 +134,7 @@ handling. First, you need to open a DCV session, so please follow the instructio
 virtual desktop session, then load the ParaView module as usual and start the GUI:
 
 ```console
-marie@dcv module load ParaView/5.7.0
+marie@dcv$ module load ParaView/5.7.0
 paraview
 ```
 
@@ -156,7 +157,7 @@ processes.
 
     ```console
     marie@login$ module ParaView/5.7.0-osmesa
-    marie@login$ srun -N1 -n8 --mem-per-cpu=2500 -p interactive --pty pvserver --force-offscreen-rendering
+    marie@login$ srun --nodes=1 --ntasks=8 --mem-per-cpu=2500 --partition=interactive --pty pvserver --force-offscreen-rendering
     srun: job 2744818 queued and waiting for resources
     srun: job 2744818 has been allocated resources
     Waiting for client...
@@ -188,8 +189,8 @@ marie@local$ ssh -L 22222:172.24.140.229:11111 taurus
 
 !!! important "SSH command"
 
-    The previous SSH command requires that you have already set up your [SSH configuration
-    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+    The previous SSH command requires that you have already set up your
+    [SSH configuration](../access/ssh_login.md#configuring-default-parameters-for-ssh).
 
 The final step is to start ParaView locally on your own machine and add the connection
 
@@ -239,8 +240,8 @@ it into thinking your provided GL rendering version is higher than what it actua
 
 ??? example
 
-    The following lines requires that you have already set up your [SSH configuration
-    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+    The following lines requires that you have already set up your
+    [SSH configuration](../access/ssh_login.md#configuring-default-parameters-for-ssh).
 
     ```console
     # 1st, connect to ZIH systems using X forwarding (-X).
@@ -252,5 +253,5 @@ it into thinking your provided GL rendering version is higher than what it actua
     marie@login$ export MESA_GL_VERSION_OVERRIDE=3.2
 
     # 3rd, start the ParaView GUI inside an interactive job. Don't forget the --x11 parameter for X forwarding:
-    marie@login$ srun -n1 -c1 -p interactive --mem-per-cpu=2500 --pty --x11=first paraview
+    marie@login$ srun --ntasks=1 --cpus-per-task=1 --partition=interactive --mem-per-cpu=2500 --pty --x11=first paraview
     ```
diff --git a/doc.zih.tu-dresden.de/mkdocs.yml b/doc.zih.tu-dresden.de/mkdocs.yml
index 4c23d89235ed25d8abdad348883f95a0a97f9e45..6f02a2cb683ac353e88a2bb42025d1342b463f58 100644
--- a/doc.zih.tu-dresden.de/mkdocs.yml
+++ b/doc.zih.tu-dresden.de/mkdocs.yml
@@ -180,11 +180,16 @@ extra_css:
 
 extra_javascript:
   - javascripts/extra.js
+  - javascripts/mermaid.min.js
 
 markdown_extensions:
     - admonition
     - pymdownx.details
-    - pymdownx.superfences
+    - pymdownx.superfences:
+        custom_fences:
+          - name: mermaid
+            class: mermaid
+            format: !!python/name:pymdownx.superfences.fence_code_format
     - pymdownx.highlight
     - toc:
         permalink: True
@@ -193,7 +198,6 @@ markdown_extensions:
     - pymdownx.tabbed:
         alternate_style: True
 
-          #plugins:
           #  - mkdocs-video
 
 extra:
diff --git a/doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh b/doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9986ad6f49e2e739f8a53d7911f4e346196d21a4
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -euo pipefail
+
+scriptpath=${BASH_SOURCE[0]}
+basedir=`dirname "$scriptpath"`
+basedir=`dirname "$basedir"`
+cd $basedir/tud_theme/javascripts
+wget https://unpkg.com/mermaid/dist/mermaid.min.js
diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
index fe4138f970cf68fff0c54f034bed92033ad11f4b..b2f8b3478d7d8aaa2247b392c97dc09d09348743 100755
--- a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
+++ b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
@@ -46,6 +46,9 @@ i	^[ |]*|$
 Avoid spaces at end of lines.
 doc.zih.tu-dresden.de/docs/accessibility.md
 i	[[:space:]]$
+When referencing projects, please use p_marie for consistency.
+
+i	\<p_	p_marie
 Avoid \`home\`. Use home without backticks instead.
 
 i	\`home\`
@@ -83,7 +86,8 @@ function checkFile(){
     fi
     IFS=$'\t' read -r flags pattern exceptionPatterns
     while IFS=$'\t' read -r -a exceptionPatternsArray; do
-      if [ $silent = false ]; then
+      #Prevent patterns from being printed when the script is invoked with default arguments.
+      if [ $verbose = true ]; then
         echo "  Pattern: $pattern$skipping"
       fi
       if [ -z "$skipping" ]; then
@@ -96,6 +100,7 @@ function checkFile(){
         if grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" ; then
           number_of_matches=`grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" | wc -l`
           ((cnt=cnt+$number_of_matches))
+          #prevent messages when silent=true, only files, pattern matches and the summary are printed
           if [ $silent = false ]; then
             echo "    $message"
           fi
@@ -106,23 +111,29 @@ function checkFile(){
 }
 
 function usage () {
-  echo "$0 [options]"
-  echo "Search forbidden patterns in markdown files."
-  echo ""
-  echo "Options:"
-  echo "  -a     Search in all markdown files (default: git-changed files)" 
-  echo "  -f     Search in a specific markdown file" 
-  echo "  -s     Silent mode"
-  echo "  -h     Show help message"
-  echo "  -c     Show git matches in color"
+cat <<EOF
+$0 [options]
+Search forbidden patterns in markdown files.
+
+Options:
+  -a    Search in all markdown files (default: git-changed files)
+  -f    Search in a specific markdown file
+  -s    Silent mode
+  -h    Show help message
+  -c    Show git matches in color
+  -v    verbose mode
+EOF
 }
 
 # Options
 all_files=false
+#if silent=true: avoid printing of messages
 silent=false
+#if verbose=true: print files first and the pattern that is checked
+verbose=false
 file=""
 color=""
-while getopts ":ahsf:c" option; do
+while getopts ":ahsf:cv" option; do
  case $option in
    a)
      all_files=true
@@ -137,6 +148,9 @@ while getopts ":ahsf:c" option; do
    c)
      color=" --color=always "
      ;;
+   v)
+     verbose=true
+     ;;
    h)
      usage
      exit;;
@@ -159,7 +173,10 @@ else
   files=`git diff --name-only "$(git merge-base HEAD "$branch")"`
 fi
 
+#Prevent files from being printed when the script is invoked with default arguments.
+if [ $verbose = true ]; then
 echo "... $files ..."
+fi
 cnt=0
 if [[ ! -z $file ]]; then
   checkFile $file